No empty .Rs/.Re
[netbsd-mini2440.git] / sys / dev / ic / esiop.c
blob9885c151dfe350493405d0fc4039d076e35fde81
1 /* $NetBSD: esiop.c,v 1.47 2009/05/16 03:57:57 tsutsui Exp $ */
3 /*
4 * Copyright (c) 2002 Manuel Bouyer.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: esiop.c,v 1.47 2009/05/16 03:57:57 tsutsui Exp $");
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
45 #include <uvm/uvm_extern.h>
47 #include <machine/endian.h>
48 #include <sys/bus.h>
50 #include <dev/microcode/siop/esiop.out>
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsiconf.h>
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/esiopvar.h>
62 #include "opt_siop.h"
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
68 #define SIOP_DEBUG
69 #define SIOP_DEBUG_DR
70 #define SIOP_DEBUG_INTR
71 #define SIOP_DEBUG_SCHED
72 #define DUMP_SCRIPT
75 #define SIOP_STATS
77 #ifndef SIOP_DEFAULT_TARGET
78 #define SIOP_DEFAULT_TARGET 7
79 #endif
81 /* number of cmd descriptors per block */
82 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct esiop_xfer))
84 void esiop_reset(struct esiop_softc *);
85 void esiop_checkdone(struct esiop_softc *);
86 void esiop_handle_reset(struct esiop_softc *);
87 void esiop_scsicmd_end(struct esiop_cmd *, int);
88 void esiop_unqueue(struct esiop_softc *, int, int);
89 int esiop_handle_qtag_reject(struct esiop_cmd *);
90 static void esiop_start(struct esiop_softc *, struct esiop_cmd *);
91 void esiop_timeout(void *);
92 void esiop_scsipi_request(struct scsipi_channel *,
93 scsipi_adapter_req_t, void *);
94 void esiop_dump_script(struct esiop_softc *);
95 void esiop_morecbd(struct esiop_softc *);
96 void esiop_moretagtbl(struct esiop_softc *);
97 void siop_add_reselsw(struct esiop_softc *, int);
98 void esiop_target_register(struct esiop_softc *, uint32_t);
100 void esiop_update_scntl3(struct esiop_softc *, struct siop_common_target *);
102 #ifdef SIOP_STATS
103 static int esiop_stat_intr = 0;
104 static int esiop_stat_intr_shortxfer = 0;
105 static int esiop_stat_intr_sdp = 0;
106 static int esiop_stat_intr_done = 0;
107 static int esiop_stat_intr_xferdisc = 0;
108 static int esiop_stat_intr_lunresel = 0;
109 static int esiop_stat_intr_qfull = 0;
110 void esiop_printstats(void);
111 #define INCSTAT(x) x++
112 #else
113 #define INCSTAT(x)
114 #endif
116 static inline void esiop_script_sync(struct esiop_softc *, int);
117 static inline void
118 esiop_script_sync(struct esiop_softc *sc, int ops)
121 if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
122 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
123 PAGE_SIZE, ops);
126 static inline uint32_t esiop_script_read(struct esiop_softc *, u_int);
127 static inline uint32_t
128 esiop_script_read(struct esiop_softc *sc, u_int offset)
131 if (sc->sc_c.features & SF_CHIP_RAM) {
132 return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
133 offset * 4);
134 } else {
135 return le32toh(sc->sc_c.sc_script[offset]);
139 static inline void esiop_script_write(struct esiop_softc *, u_int,
140 uint32_t);
141 static inline void
142 esiop_script_write(struct esiop_softc *sc, u_int offset, uint32_t val)
145 if (sc->sc_c.features & SF_CHIP_RAM) {
146 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
147 offset * 4, val);
148 } else {
149 sc->sc_c.sc_script[offset] = htole32(val);
153 void
154 esiop_attach(struct esiop_softc *sc)
156 struct esiop_dsatbl *tagtbl_donering;
158 if (siop_common_attach(&sc->sc_c) != 0 )
159 return;
161 TAILQ_INIT(&sc->free_list);
162 TAILQ_INIT(&sc->cmds);
163 TAILQ_INIT(&sc->free_tagtbl);
164 TAILQ_INIT(&sc->tag_tblblk);
165 sc->sc_currschedslot = 0;
166 #ifdef SIOP_DEBUG
167 aprint_debug_dev(sc->sc_c.sc_dev,
168 "script size = %d, PHY addr=0x%x, VIRT=%p\n",
169 (int)sizeof(esiop_script),
170 (uint32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
171 #endif
173 sc->sc_c.sc_adapt.adapt_max_periph = ESIOP_NTAG;
174 sc->sc_c.sc_adapt.adapt_request = esiop_scsipi_request;
177 * get space for the CMD done slot. For this we use a tag table entry.
178 * It's the same size and allows us to not waste 3/4 of a page
180 #ifdef DIAGNOSTIC
181 if (ESIOP_NTAG != A_ndone_slots) {
182 aprint_error_dev(sc->sc_c.sc_dev,
183 "size of tag DSA table different from the done ring\n");
184 return;
186 #endif
187 esiop_moretagtbl(sc);
188 tagtbl_donering = TAILQ_FIRST(&sc->free_tagtbl);
189 if (tagtbl_donering == NULL) {
190 aprint_error_dev(sc->sc_c.sc_dev,
191 "no memory for command done ring\n");
192 return;
194 TAILQ_REMOVE(&sc->free_tagtbl, tagtbl_donering, next);
195 sc->sc_done_map = tagtbl_donering->tblblk->blkmap;
196 sc->sc_done_offset = tagtbl_donering->tbl_offset;
197 sc->sc_done_slot = &tagtbl_donering->tbl[0];
199 /* Do a bus reset, so that devices fall back to narrow/async */
200 siop_resetbus(&sc->sc_c);
202 * siop_reset() will reset the chip, thus clearing pending interrupts
204 esiop_reset(sc);
205 #ifdef DUMP_SCRIPT
206 esiop_dump_script(sc);
207 #endif
209 config_found(sc->sc_c.sc_dev, &sc->sc_c.sc_chan, scsiprint);
212 void
213 esiop_reset(struct esiop_softc *sc)
215 int i, j;
216 uint32_t addr;
217 uint32_t msgin_addr, sem_addr;
219 siop_common_reset(&sc->sc_c);
222 * we copy the script at the beggining of RAM. Then there is 4 bytes
223 * for messages in, and 4 bytes for semaphore
225 sc->sc_free_offset = __arraycount(esiop_script);
226 msgin_addr =
227 sc->sc_free_offset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
228 sc->sc_free_offset += 1;
229 sc->sc_semoffset = sc->sc_free_offset;
230 sem_addr =
231 sc->sc_semoffset * sizeof(uint32_t) + sc->sc_c.sc_scriptaddr;
232 sc->sc_free_offset += 1;
233 /* then we have the scheduler ring */
234 sc->sc_shedoffset = sc->sc_free_offset;
235 sc->sc_free_offset += A_ncmd_slots * CMD_SLOTSIZE;
236 /* then the targets DSA table */
237 sc->sc_target_table_offset = sc->sc_free_offset;
238 sc->sc_free_offset += sc->sc_c.sc_chan.chan_ntargets;
239 /* copy and patch the script */
240 if (sc->sc_c.features & SF_CHIP_RAM) {
241 bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
242 esiop_script,
243 __arraycount(esiop_script));
244 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
245 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
246 E_tlq_offset_Used[j] * 4,
247 sizeof(struct siop_common_xfer));
249 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
250 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
251 E_saved_offset_offset_Used[j] * 4,
252 sizeof(struct siop_common_xfer) + 4);
254 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
255 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
256 E_abs_msgin2_Used[j] * 4, msgin_addr);
258 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
259 bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
260 E_abs_sem_Used[j] * 4, sem_addr);
263 if (sc->sc_c.features & SF_CHIP_LED0) {
264 bus_space_write_region_4(sc->sc_c.sc_ramt,
265 sc->sc_c.sc_ramh,
266 Ent_led_on1, esiop_led_on,
267 __arraycount(esiop_led_on));
268 bus_space_write_region_4(sc->sc_c.sc_ramt,
269 sc->sc_c.sc_ramh,
270 Ent_led_on2, esiop_led_on,
271 __arraycount(esiop_led_on));
272 bus_space_write_region_4(sc->sc_c.sc_ramt,
273 sc->sc_c.sc_ramh,
274 Ent_led_off, esiop_led_off,
275 __arraycount(esiop_led_off));
277 } else {
278 for (j = 0; j < __arraycount(esiop_script); j++) {
279 sc->sc_c.sc_script[j] = htole32(esiop_script[j]);
281 for (j = 0; j < __arraycount(E_tlq_offset_Used); j++) {
282 sc->sc_c.sc_script[E_tlq_offset_Used[j]] =
283 htole32(sizeof(struct siop_common_xfer));
285 for (j = 0; j < __arraycount(E_saved_offset_offset_Used); j++) {
286 sc->sc_c.sc_script[E_saved_offset_offset_Used[j]] =
287 htole32(sizeof(struct siop_common_xfer) + 4);
289 for (j = 0; j < __arraycount(E_abs_msgin2_Used); j++) {
290 sc->sc_c.sc_script[E_abs_msgin2_Used[j]] =
291 htole32(msgin_addr);
293 for (j = 0; j < __arraycount(E_abs_sem_Used); j++) {
294 sc->sc_c.sc_script[E_abs_sem_Used[j]] =
295 htole32(sem_addr);
298 if (sc->sc_c.features & SF_CHIP_LED0) {
299 for (j = 0; j < __arraycount(esiop_led_on); j++)
300 sc->sc_c.sc_script[
301 Ent_led_on1 / sizeof(esiop_led_on[0]) + j
302 ] = htole32(esiop_led_on[j]);
303 for (j = 0; j < __arraycount(esiop_led_on); j++)
304 sc->sc_c.sc_script[
305 Ent_led_on2 / sizeof(esiop_led_on[0]) + j
306 ] = htole32(esiop_led_on[j]);
307 for (j = 0; j < __arraycount(esiop_led_off); j++)
308 sc->sc_c.sc_script[
309 Ent_led_off / sizeof(esiop_led_off[0]) + j
310 ] = htole32(esiop_led_off[j]);
313 /* get base of scheduler ring */
314 addr = sc->sc_c.sc_scriptaddr + sc->sc_shedoffset * sizeof(uint32_t);
315 /* init scheduler */
316 for (i = 0; i < A_ncmd_slots; i++) {
317 esiop_script_write(sc,
318 sc->sc_shedoffset + i * CMD_SLOTSIZE, A_f_cmd_free);
320 sc->sc_currschedslot = 0;
321 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
322 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHD, addr);
324 * 0x78000000 is a 'move data8 to reg'. data8 is the second
325 * octet, reg offset is the third.
327 esiop_script_write(sc, Ent_cmdr0 / 4,
328 0x78640000 | ((addr & 0x000000ff) << 8));
329 esiop_script_write(sc, Ent_cmdr1 / 4,
330 0x78650000 | ((addr & 0x0000ff00) ));
331 esiop_script_write(sc, Ent_cmdr2 / 4,
332 0x78660000 | ((addr & 0x00ff0000) >> 8));
333 esiop_script_write(sc, Ent_cmdr3 / 4,
334 0x78670000 | ((addr & 0xff000000) >> 16));
335 /* done ring */
336 for (i = 0; i < A_ndone_slots; i++)
337 sc->sc_done_slot[i] = 0;
338 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
339 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
340 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
341 addr = sc->sc_done_map->dm_segs[0].ds_addr + sc->sc_done_offset;
342 sc->sc_currdoneslot = 0;
343 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHE + 2, 0);
344 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHF, addr);
345 esiop_script_write(sc, Ent_doner0 / 4,
346 0x786c0000 | ((addr & 0x000000ff) << 8));
347 esiop_script_write(sc, Ent_doner1 / 4,
348 0x786d0000 | ((addr & 0x0000ff00) ));
349 esiop_script_write(sc, Ent_doner2 / 4,
350 0x786e0000 | ((addr & 0x00ff0000) >> 8));
351 esiop_script_write(sc, Ent_doner3 / 4,
352 0x786f0000 | ((addr & 0xff000000) >> 16));
354 /* set flags */
355 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_SCRATCHC, 0);
356 /* write pointer of base of target DSA table */
357 addr = (sc->sc_target_table_offset * sizeof(uint32_t)) +
358 sc->sc_c.sc_scriptaddr;
359 esiop_script_write(sc, (Ent_load_targtable / 4) + 0,
360 esiop_script_read(sc,(Ent_load_targtable / 4) + 0) |
361 ((addr & 0x000000ff) << 8));
362 esiop_script_write(sc, (Ent_load_targtable / 4) + 2,
363 esiop_script_read(sc,(Ent_load_targtable / 4) + 2) |
364 ((addr & 0x0000ff00) ));
365 esiop_script_write(sc, (Ent_load_targtable / 4) + 4,
366 esiop_script_read(sc,(Ent_load_targtable / 4) + 4) |
367 ((addr & 0x00ff0000) >> 8));
368 esiop_script_write(sc, (Ent_load_targtable / 4) + 6,
369 esiop_script_read(sc,(Ent_load_targtable / 4) + 6) |
370 ((addr & 0xff000000) >> 16));
371 #ifdef SIOP_DEBUG
372 printf("%s: target table offset %d free offset %d\n",
373 device_xname(sc->sc_c.sc_dev), sc->sc_target_table_offset,
374 sc->sc_free_offset);
375 #endif
377 /* register existing targets */
378 for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
379 if (sc->sc_c.targets[i])
380 esiop_target_register(sc, i);
382 /* start script */
383 if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
384 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
385 PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
387 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
388 sc->sc_c.sc_scriptaddr + Ent_reselect);
391 #if 0
392 #define CALL_SCRIPT(ent) do { \
393 printf ("start script DSA 0x%lx DSP 0x%lx\n", \
394 esiop_cmd->cmd_c.dsa, \
395 sc->sc_c.sc_scriptaddr + ent); \
396 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
397 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
398 } while (/* CONSTCOND */0)
399 #else
400 #define CALL_SCRIPT(ent) do { \
401 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, \
402 SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
403 } while (/* CONSTCOND */0)
404 #endif
407 esiop_intr(void *v)
409 struct esiop_softc *sc = v;
410 struct esiop_target *esiop_target;
411 struct esiop_cmd *esiop_cmd;
412 struct esiop_lun *esiop_lun;
413 struct scsipi_xfer *xs;
414 int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
415 uint32_t irqcode;
416 int need_reset = 0;
417 int offset, target, lun, tag;
418 uint32_t tflags;
419 uint32_t addr;
420 int freetarget = 0;
421 int slot;
422 int retval = 0;
424 again:
425 istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
426 if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0) {
427 return retval;
429 retval = 1;
430 INCSTAT(esiop_stat_intr);
431 esiop_checkdone(sc);
432 if (istat & ISTAT_INTF) {
433 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
434 SIOP_ISTAT, ISTAT_INTF);
435 goto again;
438 if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
439 (ISTAT_DIP | ISTAT_ABRT)) {
440 /* clear abort */
441 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
442 SIOP_ISTAT, 0);
445 /* get CMD from T/L/Q */
446 tflags = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
447 SIOP_SCRATCHC);
448 #ifdef SIOP_DEBUG_INTR
449 printf("interrupt, istat=0x%x tflags=0x%x "
450 "DSA=0x%x DSP=0x%lx\n", istat, tflags,
451 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
452 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
453 SIOP_DSP) -
454 sc->sc_c.sc_scriptaddr));
455 #endif
456 target = (tflags & A_f_c_target) ? ((tflags >> 8) & 0xff) : -1;
457 if (target > sc->sc_c.sc_chan.chan_ntargets) target = -1;
458 lun = (tflags & A_f_c_lun) ? ((tflags >> 16) & 0xff) : -1;
459 if (lun > sc->sc_c.sc_chan.chan_nluns) lun = -1;
460 tag = (tflags & A_f_c_tag) ? ((tflags >> 24) & 0xff) : -1;
462 if (target >= 0 && lun >= 0) {
463 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
464 if (esiop_target == NULL) {
465 printf("esiop_target (target %d) not valid\n", target);
466 goto none;
468 esiop_lun = esiop_target->esiop_lun[lun];
469 if (esiop_lun == NULL) {
470 printf("esiop_lun (target %d lun %d) not valid\n",
471 target, lun);
472 goto none;
474 esiop_cmd =
475 (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
476 if (esiop_cmd == NULL) {
477 printf("esiop_cmd (target %d lun %d tag %d)"
478 " not valid\n",
479 target, lun, tag);
480 goto none;
482 xs = esiop_cmd->cmd_c.xs;
483 #ifdef DIAGNOSTIC
484 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
485 printf("esiop_cmd (target %d lun %d) "
486 "not active (%d)\n", target, lun,
487 esiop_cmd->cmd_c.status);
488 goto none;
490 #endif
491 esiop_table_sync(esiop_cmd,
492 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
493 } else {
494 none:
495 xs = NULL;
496 esiop_target = NULL;
497 esiop_lun = NULL;
498 esiop_cmd = NULL;
500 if (istat & ISTAT_DIP) {
501 dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
502 SIOP_DSTAT);
503 if (dstat & DSTAT_ABRT) {
504 /* was probably generated by a bus reset IOCTL */
505 if ((dstat & DSTAT_DFE) == 0)
506 siop_clearfifo(&sc->sc_c);
507 goto reset;
509 if (dstat & DSTAT_SSI) {
510 printf("single step dsp 0x%08x dsa 0x08%x\n",
511 (int)(bus_space_read_4(sc->sc_c.sc_rt,
512 sc->sc_c.sc_rh, SIOP_DSP) -
513 sc->sc_c.sc_scriptaddr),
514 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
515 SIOP_DSA));
516 if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
517 (istat & ISTAT_SIP) == 0) {
518 bus_space_write_1(sc->sc_c.sc_rt,
519 sc->sc_c.sc_rh, SIOP_DCNTL,
520 bus_space_read_1(sc->sc_c.sc_rt,
521 sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
523 return 1;
526 if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
527 printf("%s: DMA IRQ:", device_xname(sc->sc_c.sc_dev));
528 if (dstat & DSTAT_IID)
529 printf(" Illegal instruction");
530 if (dstat & DSTAT_BF)
531 printf(" bus fault");
532 if (dstat & DSTAT_MDPE)
533 printf(" parity");
534 if (dstat & DSTAT_DFE)
535 printf(" DMA fifo empty");
536 else
537 siop_clearfifo(&sc->sc_c);
538 printf(", DSP=0x%x DSA=0x%x: ",
539 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
540 SIOP_DSP) - sc->sc_c.sc_scriptaddr),
541 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
542 if (esiop_cmd)
543 printf("T/L/Q=%d/%d/%d last msg_in=0x%x status=0x%x\n",
544 target, lun, tag, esiop_cmd->cmd_tables->msg_in[0],
545 le32toh(esiop_cmd->cmd_tables->status));
546 else
547 printf(" current T/L/Q invalid\n");
548 need_reset = 1;
551 if (istat & ISTAT_SIP) {
552 if (istat & ISTAT_DIP)
553 delay(10);
555 * Can't read sist0 & sist1 independently, or we have to
556 * insert delay
558 sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
559 SIOP_SIST0);
560 sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
561 SIOP_SSTAT1);
562 #ifdef SIOP_DEBUG_INTR
563 printf("scsi interrupt, sist=0x%x sstat1=0x%x "
564 "DSA=0x%x DSP=0x%lx\n", sist, sstat1,
565 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
566 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
567 SIOP_DSP) -
568 sc->sc_c.sc_scriptaddr));
569 #endif
570 if (sist & SIST0_RST) {
571 esiop_handle_reset(sc);
572 /* no table to flush here */
573 return 1;
575 if (sist & SIST0_SGE) {
576 if (esiop_cmd)
577 scsipi_printaddr(xs->xs_periph);
578 else
579 printf("%s:", device_xname(sc->sc_c.sc_dev));
580 printf("scsi gross error\n");
581 if (esiop_target)
582 esiop_target->target_c.flags &= ~TARF_DT;
583 #ifdef DEBUG
584 printf("DSA=0x%x DSP=0x%lx\n",
585 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
586 SIOP_DSA),
587 (u_long)(bus_space_read_4(sc->sc_c.sc_rt,
588 sc->sc_c.sc_rh, SIOP_DSP) -
589 sc->sc_c.sc_scriptaddr));
590 printf("SDID 0x%x SCNTL3 0x%x SXFER 0x%x SCNTL4 0x%x\n",
591 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
592 SIOP_SDID),
593 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
594 SIOP_SCNTL3),
595 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
596 SIOP_SXFER),
597 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
598 SIOP_SCNTL4));
600 #endif
601 goto reset;
603 if ((sist & SIST0_MA) && need_reset == 0) {
604 if (esiop_cmd) {
605 int scratchc0;
606 dstat = bus_space_read_1(sc->sc_c.sc_rt,
607 sc->sc_c.sc_rh, SIOP_DSTAT);
609 * first restore DSA, in case we were in a S/G
610 * operation.
612 bus_space_write_4(sc->sc_c.sc_rt,
613 sc->sc_c.sc_rh,
614 SIOP_DSA, esiop_cmd->cmd_c.dsa);
615 scratchc0 = bus_space_read_1(sc->sc_c.sc_rt,
616 sc->sc_c.sc_rh, SIOP_SCRATCHC);
617 switch (sstat1 & SSTAT1_PHASE_MASK) {
618 case SSTAT1_PHASE_STATUS:
620 * previous phase may be aborted for any reason
621 * ( for example, the target has less data to
622 * transfer than requested). Compute resid and
623 * just go to status, the command should
624 * terminate.
626 INCSTAT(esiop_stat_intr_shortxfer);
627 if (scratchc0 & A_f_c_data)
628 siop_ma(&esiop_cmd->cmd_c);
629 else if ((dstat & DSTAT_DFE) == 0)
630 siop_clearfifo(&sc->sc_c);
631 CALL_SCRIPT(Ent_status);
632 return 1;
633 case SSTAT1_PHASE_MSGIN:
635 * target may be ready to disconnect
636 * Compute resid which would be used later
637 * if a save data pointer is needed.
639 INCSTAT(esiop_stat_intr_xferdisc);
640 if (scratchc0 & A_f_c_data)
641 siop_ma(&esiop_cmd->cmd_c);
642 else if ((dstat & DSTAT_DFE) == 0)
643 siop_clearfifo(&sc->sc_c);
644 bus_space_write_1(sc->sc_c.sc_rt,
645 sc->sc_c.sc_rh, SIOP_SCRATCHC,
646 scratchc0 & ~A_f_c_data);
647 CALL_SCRIPT(Ent_msgin);
648 return 1;
650 aprint_error_dev(sc->sc_c.sc_dev,
651 "unexpected phase mismatch %d\n",
652 sstat1 & SSTAT1_PHASE_MASK);
653 } else {
654 aprint_error_dev(sc->sc_c.sc_dev,
655 "phase mismatch without command\n");
657 need_reset = 1;
659 if (sist & SIST0_PAR) {
660 /* parity error, reset */
661 if (esiop_cmd)
662 scsipi_printaddr(xs->xs_periph);
663 else
664 printf("%s:", device_xname(sc->sc_c.sc_dev));
665 printf("parity error\n");
666 if (esiop_target)
667 esiop_target->target_c.flags &= ~TARF_DT;
668 goto reset;
670 if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
672 * selection time out, assume there's no device here
673 * We also have to update the ring pointer ourselve
675 slot = bus_space_read_1(sc->sc_c.sc_rt,
676 sc->sc_c.sc_rh, SIOP_SCRATCHE);
677 esiop_script_sync(sc,
678 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
679 #ifdef SIOP_DEBUG_SCHED
680 printf("sel timeout target %d, slot %d\n",
681 target, slot);
682 #endif
684 * mark this slot as free, and advance to next slot
686 esiop_script_write(sc,
687 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
688 A_f_cmd_free);
689 addr = bus_space_read_4(sc->sc_c.sc_rt,
690 sc->sc_c.sc_rh, SIOP_SCRATCHD);
691 if (slot < (A_ncmd_slots - 1)) {
692 bus_space_write_1(sc->sc_c.sc_rt,
693 sc->sc_c.sc_rh, SIOP_SCRATCHE, slot + 1);
694 addr = addr + sizeof(struct esiop_slot);
695 } else {
696 bus_space_write_1(sc->sc_c.sc_rt,
697 sc->sc_c.sc_rh, SIOP_SCRATCHE, 0);
698 addr = sc->sc_c.sc_scriptaddr +
699 sc->sc_shedoffset * sizeof(uint32_t);
701 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
702 SIOP_SCRATCHD, addr);
703 esiop_script_sync(sc,
704 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
705 if (esiop_cmd) {
706 esiop_cmd->cmd_c.status = CMDST_DONE;
707 xs->error = XS_SELTIMEOUT;
708 freetarget = 1;
709 goto end;
710 } else {
711 printf("%s: selection timeout without "
712 "command, target %d (sdid 0x%x), "
713 "slot %d\n",
714 device_xname(sc->sc_c.sc_dev), target,
715 bus_space_read_1(sc->sc_c.sc_rt,
716 sc->sc_c.sc_rh, SIOP_SDID), slot);
717 need_reset = 1;
720 if (sist & SIST0_UDC) {
722 * unexpected disconnect. Usually the target signals
723 * a fatal condition this way. Attempt to get sense.
725 if (esiop_cmd) {
726 esiop_cmd->cmd_tables->status =
727 htole32(SCSI_CHECK);
728 goto end;
730 aprint_error_dev(sc->sc_c.sc_dev,
731 "unexpected disconnect without command\n");
732 goto reset;
734 if (sist & (SIST1_SBMC << 8)) {
735 /* SCSI bus mode change */
736 if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
737 goto reset;
738 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
740 * we have a script interrupt, it will
741 * restart the script.
743 goto scintr;
746 * else we have to restart it ourselve, at the
747 * interrupted instruction.
749 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
750 SIOP_DSP,
751 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
752 SIOP_DSP) - 8);
753 return 1;
755 /* Else it's an unhandled exception (for now). */
756 aprint_error_dev(sc->sc_c.sc_dev,
757 "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
758 "DSA=0x%x DSP=0x%x\n", sist,
759 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
760 SIOP_SSTAT1),
761 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
762 (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
763 SIOP_DSP) - sc->sc_c.sc_scriptaddr));
764 if (esiop_cmd) {
765 esiop_cmd->cmd_c.status = CMDST_DONE;
766 xs->error = XS_SELTIMEOUT;
767 goto end;
769 need_reset = 1;
771 if (need_reset) {
772 reset:
773 /* fatal error, reset the bus */
774 siop_resetbus(&sc->sc_c);
775 /* no table to flush here */
776 return 1;
779 scintr:
780 if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
781 irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
782 SIOP_DSPS);
783 #ifdef SIOP_DEBUG_INTR
784 printf("script interrupt 0x%x\n", irqcode);
785 #endif
787 * no command, or an inactive command is only valid for a
788 * reselect interrupt
790 if ((irqcode & 0x80) == 0) {
791 if (esiop_cmd == NULL) {
792 aprint_error_dev(sc->sc_c.sc_dev,
793 "script interrupt (0x%x) with invalid DSA !!!\n",
794 irqcode);
795 goto reset;
797 if (esiop_cmd->cmd_c.status != CMDST_ACTIVE) {
798 aprint_error_dev(sc->sc_c.sc_dev,
799 "command with invalid status "
800 "(IRQ code 0x%x current status %d) !\n",
801 irqcode, esiop_cmd->cmd_c.status);
802 xs = NULL;
805 switch(irqcode) {
806 case A_int_err:
807 printf("error, DSP=0x%x\n",
808 (int)(bus_space_read_4(sc->sc_c.sc_rt,
809 sc->sc_c.sc_rh, SIOP_DSP) -
810 sc->sc_c.sc_scriptaddr));
811 if (xs) {
812 xs->error = XS_SELTIMEOUT;
813 goto end;
814 } else {
815 goto reset;
817 case A_int_msgin:
819 int msgin = bus_space_read_1(sc->sc_c.sc_rt,
820 sc->sc_c.sc_rh, SIOP_SFBR);
821 if (msgin == MSG_MESSAGE_REJECT) {
822 int msg, extmsg;
823 if (esiop_cmd->cmd_tables->msg_out[0] & 0x80) {
825 * message was part of a identify +
826 * something else. Identify shouldn't
827 * have been rejected.
829 msg =
830 esiop_cmd->cmd_tables->msg_out[1];
831 extmsg =
832 esiop_cmd->cmd_tables->msg_out[3];
833 } else {
834 msg =
835 esiop_cmd->cmd_tables->msg_out[0];
836 extmsg =
837 esiop_cmd->cmd_tables->msg_out[2];
839 if (msg == MSG_MESSAGE_REJECT) {
840 /* MSG_REJECT for a MSG_REJECT !*/
841 if (xs)
842 scsipi_printaddr(xs->xs_periph);
843 else
844 printf("%s: ", device_xname(
845 sc->sc_c.sc_dev));
846 printf("our reject message was "
847 "rejected\n");
848 goto reset;
850 if (msg == MSG_EXTENDED &&
851 extmsg == MSG_EXT_WDTR) {
852 /* WDTR rejected, initiate sync */
853 if ((esiop_target->target_c.flags &
854 TARF_SYNC) == 0) {
855 esiop_target->target_c.status =
856 TARST_OK;
857 siop_update_xfer_mode(&sc->sc_c,
858 target);
859 /* no table to flush here */
860 CALL_SCRIPT(Ent_msgin_ack);
861 return 1;
863 esiop_target->target_c.status =
864 TARST_SYNC_NEG;
865 siop_sdtr_msg(&esiop_cmd->cmd_c, 0,
866 sc->sc_c.st_minsync,
867 sc->sc_c.maxoff);
868 esiop_table_sync(esiop_cmd,
869 BUS_DMASYNC_PREREAD |
870 BUS_DMASYNC_PREWRITE);
871 CALL_SCRIPT(Ent_send_msgout);
872 return 1;
873 } else if (msg == MSG_EXTENDED &&
874 extmsg == MSG_EXT_SDTR) {
875 /* sync rejected */
876 esiop_target->target_c.offset = 0;
877 esiop_target->target_c.period = 0;
878 esiop_target->target_c.status =
879 TARST_OK;
880 siop_update_xfer_mode(&sc->sc_c,
881 target);
882 /* no table to flush here */
883 CALL_SCRIPT(Ent_msgin_ack);
884 return 1;
885 } else if (msg == MSG_EXTENDED &&
886 extmsg == MSG_EXT_PPR) {
887 /* PPR rejected */
888 esiop_target->target_c.offset = 0;
889 esiop_target->target_c.period = 0;
890 esiop_target->target_c.status =
891 TARST_OK;
892 siop_update_xfer_mode(&sc->sc_c,
893 target);
894 /* no table to flush here */
895 CALL_SCRIPT(Ent_msgin_ack);
896 return 1;
897 } else if (msg == MSG_SIMPLE_Q_TAG ||
898 msg == MSG_HEAD_OF_Q_TAG ||
899 msg == MSG_ORDERED_Q_TAG) {
900 if (esiop_handle_qtag_reject(
901 esiop_cmd) == -1)
902 goto reset;
903 CALL_SCRIPT(Ent_msgin_ack);
904 return 1;
906 if (xs)
907 scsipi_printaddr(xs->xs_periph);
908 else
909 printf("%s: ",
910 device_xname(sc->sc_c.sc_dev));
911 if (msg == MSG_EXTENDED) {
912 printf("scsi message reject, extended "
913 "message sent was 0x%x\n", extmsg);
914 } else {
915 printf("scsi message reject, message "
916 "sent was 0x%x\n", msg);
918 /* no table to flush here */
919 CALL_SCRIPT(Ent_msgin_ack);
920 return 1;
922 if (msgin == MSG_IGN_WIDE_RESIDUE) {
923 /* use the extmsgdata table to get the second byte */
924 esiop_cmd->cmd_tables->t_extmsgdata.count =
925 htole32(1);
926 esiop_table_sync(esiop_cmd,
927 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
928 CALL_SCRIPT(Ent_get_extmsgdata);
929 return 1;
931 if (xs)
932 scsipi_printaddr(xs->xs_periph);
933 else
934 printf("%s: ", device_xname(sc->sc_c.sc_dev));
935 printf("unhandled message 0x%x\n", msgin);
936 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
937 esiop_cmd->cmd_tables->t_msgout.count= htole32(1);
938 esiop_table_sync(esiop_cmd,
939 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
940 CALL_SCRIPT(Ent_send_msgout);
941 return 1;
943 case A_int_extmsgin:
944 #ifdef SIOP_DEBUG_INTR
945 printf("extended message: msg 0x%x len %d\n",
946 esiop_cmd->cmd_tables->msg_in[2],
947 esiop_cmd->cmd_tables->msg_in[1]);
948 #endif
949 if (esiop_cmd->cmd_tables->msg_in[1] >
950 sizeof(esiop_cmd->cmd_tables->msg_in) - 2)
951 aprint_error_dev(sc->sc_c.sc_dev,
952 "extended message too big (%d)\n",
953 esiop_cmd->cmd_tables->msg_in[1]);
954 esiop_cmd->cmd_tables->t_extmsgdata.count =
955 htole32(esiop_cmd->cmd_tables->msg_in[1] - 1);
956 esiop_table_sync(esiop_cmd,
957 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
958 CALL_SCRIPT(Ent_get_extmsgdata);
959 return 1;
960 case A_int_extmsgdata:
961 #ifdef SIOP_DEBUG_INTR
963 int i;
964 printf("extended message: 0x%x, data:",
965 esiop_cmd->cmd_tables->msg_in[2]);
966 for (i = 3; i < 2 + esiop_cmd->cmd_tables->msg_in[1];
967 i++)
968 printf(" 0x%x",
969 esiop_cmd->cmd_tables->msg_in[i]);
970 printf("\n");
972 #endif
973 if (esiop_cmd->cmd_tables->msg_in[0] ==
974 MSG_IGN_WIDE_RESIDUE) {
975 /* we got the second byte of MSG_IGN_WIDE_RESIDUE */
976 if (esiop_cmd->cmd_tables->msg_in[3] != 1)
977 printf("MSG_IGN_WIDE_RESIDUE: "
978 "bad len %d\n",
979 esiop_cmd->cmd_tables->msg_in[3]);
980 switch (siop_iwr(&esiop_cmd->cmd_c)) {
981 case SIOP_NEG_MSGOUT:
982 esiop_table_sync(esiop_cmd,
983 BUS_DMASYNC_PREREAD |
984 BUS_DMASYNC_PREWRITE);
985 CALL_SCRIPT(Ent_send_msgout);
986 return 1;
987 case SIOP_NEG_ACK:
988 CALL_SCRIPT(Ent_msgin_ack);
989 return 1;
990 default:
991 panic("invalid retval from "
992 "siop_iwr()");
994 return 1;
996 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_PPR) {
997 switch (siop_ppr_neg(&esiop_cmd->cmd_c)) {
998 case SIOP_NEG_MSGOUT:
999 esiop_update_scntl3(sc,
1000 esiop_cmd->cmd_c.siop_target);
1001 esiop_table_sync(esiop_cmd,
1002 BUS_DMASYNC_PREREAD |
1003 BUS_DMASYNC_PREWRITE);
1004 CALL_SCRIPT(Ent_send_msgout);
1005 return 1;
1006 case SIOP_NEG_ACK:
1007 esiop_update_scntl3(sc,
1008 esiop_cmd->cmd_c.siop_target);
1009 CALL_SCRIPT(Ent_msgin_ack);
1010 return 1;
1011 default:
1012 panic("invalid retval from "
1013 "siop_wdtr_neg()");
1015 return 1;
1017 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
1018 switch (siop_wdtr_neg(&esiop_cmd->cmd_c)) {
1019 case SIOP_NEG_MSGOUT:
1020 esiop_update_scntl3(sc,
1021 esiop_cmd->cmd_c.siop_target);
1022 esiop_table_sync(esiop_cmd,
1023 BUS_DMASYNC_PREREAD |
1024 BUS_DMASYNC_PREWRITE);
1025 CALL_SCRIPT(Ent_send_msgout);
1026 return 1;
1027 case SIOP_NEG_ACK:
1028 esiop_update_scntl3(sc,
1029 esiop_cmd->cmd_c.siop_target);
1030 CALL_SCRIPT(Ent_msgin_ack);
1031 return 1;
1032 default:
1033 panic("invalid retval from "
1034 "siop_wdtr_neg()");
1036 return 1;
1038 if (esiop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
1039 switch (siop_sdtr_neg(&esiop_cmd->cmd_c)) {
1040 case SIOP_NEG_MSGOUT:
1041 esiop_update_scntl3(sc,
1042 esiop_cmd->cmd_c.siop_target);
1043 esiop_table_sync(esiop_cmd,
1044 BUS_DMASYNC_PREREAD |
1045 BUS_DMASYNC_PREWRITE);
1046 CALL_SCRIPT(Ent_send_msgout);
1047 return 1;
1048 case SIOP_NEG_ACK:
1049 esiop_update_scntl3(sc,
1050 esiop_cmd->cmd_c.siop_target);
1051 CALL_SCRIPT(Ent_msgin_ack);
1052 return 1;
1053 default:
1054 panic("invalid retval from "
1055 "siop_wdtr_neg()");
1057 return 1;
1059 /* send a message reject */
1060 esiop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
1061 esiop_cmd->cmd_tables->t_msgout.count = htole32(1);
1062 esiop_table_sync(esiop_cmd,
1063 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1064 CALL_SCRIPT(Ent_send_msgout);
1065 return 1;
1066 case A_int_disc:
1067 INCSTAT(esiop_stat_intr_sdp);
1068 offset = bus_space_read_1(sc->sc_c.sc_rt,
1069 sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
1070 #ifdef SIOP_DEBUG_DR
1071 printf("disconnect offset %d\n", offset);
1072 #endif
1073 siop_sdp(&esiop_cmd->cmd_c, offset);
1074 /* we start again with no offset */
1075 ESIOP_XFER(esiop_cmd, saved_offset) =
1076 htole32(SIOP_NOOFFSET);
1077 esiop_table_sync(esiop_cmd,
1078 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1079 CALL_SCRIPT(Ent_script_sched);
1080 return 1;
1081 case A_int_resfail:
1082 printf("reselect failed\n");
1083 CALL_SCRIPT(Ent_script_sched);
1084 return 1;
1085 case A_int_done:
1086 if (xs == NULL) {
1087 printf("%s: done without command\n",
1088 device_xname(sc->sc_c.sc_dev));
1089 CALL_SCRIPT(Ent_script_sched);
1090 return 1;
1092 #ifdef SIOP_DEBUG_INTR
1093 printf("done, DSA=0x%lx target id 0x%x last msg "
1094 "in=0x%x status=0x%x\n",
1095 (u_long)esiop_cmd->cmd_c.dsa,
1096 le32toh(esiop_cmd->cmd_tables->id),
1097 esiop_cmd->cmd_tables->msg_in[0],
1098 le32toh(esiop_cmd->cmd_tables->status));
1099 #endif
1100 INCSTAT(esiop_stat_intr_done);
1101 esiop_cmd->cmd_c.status = CMDST_DONE;
1102 goto end;
1103 default:
1104 printf("unknown irqcode %x\n", irqcode);
1105 if (xs) {
1106 xs->error = XS_SELTIMEOUT;
1107 goto end;
1109 goto reset;
1111 return 1;
1113 /* We just should't get there */
1114 panic("siop_intr: I shouldn't be there !");
1116 end:
1118 * restart the script now if command completed properly
1119 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
1120 * queue
1122 xs->status = le32toh(esiop_cmd->cmd_tables->status);
1123 #ifdef SIOP_DEBUG_INTR
1124 printf("esiop_intr end: status %d\n", xs->status);
1125 #endif
1126 if (tag >= 0)
1127 esiop_lun->tactive[tag] = NULL;
1128 else
1129 esiop_lun->active = NULL;
1130 offset = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1131 SIOP_SCRATCHA + 1);
1133 * if we got a disconnect between the last data phase
1134 * and the status phase, offset will be 0. In this
1135 * case, cmd_tables->saved_offset will have the proper value
1136 * if it got updated by the controller
1138 if (offset == 0 &&
1139 ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1140 offset =
1141 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1143 esiop_scsicmd_end(esiop_cmd, offset);
1144 if (freetarget && esiop_target->target_c.status == TARST_PROBING)
1145 esiop_del_dev(sc, target, lun);
1146 CALL_SCRIPT(Ent_script_sched);
1147 return 1;
1150 void
1151 esiop_scsicmd_end(struct esiop_cmd *esiop_cmd, int offset)
1153 struct scsipi_xfer *xs = esiop_cmd->cmd_c.xs;
1154 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1156 siop_update_resid(&esiop_cmd->cmd_c, offset);
1158 switch(xs->status) {
1159 case SCSI_OK:
1160 xs->error = XS_NOERROR;
1161 break;
1162 case SCSI_BUSY:
1163 xs->error = XS_BUSY;
1164 break;
1165 case SCSI_CHECK:
1166 xs->error = XS_BUSY;
1167 /* remove commands in the queue and scheduler */
1168 esiop_unqueue(sc, xs->xs_periph->periph_target,
1169 xs->xs_periph->periph_lun);
1170 break;
1171 case SCSI_QUEUE_FULL:
1172 INCSTAT(esiop_stat_intr_qfull);
1173 #ifdef SIOP_DEBUG
1174 printf("%s:%d:%d: queue full (tag %d)\n",
1175 device_xname(sc->sc_c.sc_dev),
1176 xs->xs_periph->periph_target,
1177 xs->xs_periph->periph_lun, esiop_cmd->cmd_c.tag);
1178 #endif
1179 xs->error = XS_BUSY;
1180 break;
1181 case SCSI_SIOP_NOCHECK:
1183 * don't check status, xs->error is already valid
1185 break;
1186 case SCSI_SIOP_NOSTATUS:
1188 * the status byte was not updated, cmd was
1189 * aborted
1191 xs->error = XS_SELTIMEOUT;
1192 break;
1193 default:
1194 scsipi_printaddr(xs->xs_periph);
1195 printf("invalid status code %d\n", xs->status);
1196 xs->error = XS_DRIVER_STUFFUP;
1198 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1199 bus_dmamap_sync(sc->sc_c.sc_dmat,
1200 esiop_cmd->cmd_c.dmamap_data, 0,
1201 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1202 (xs->xs_control & XS_CTL_DATA_IN) ?
1203 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1204 bus_dmamap_unload(sc->sc_c.sc_dmat,
1205 esiop_cmd->cmd_c.dmamap_data);
1207 bus_dmamap_unload(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd);
1208 if ((xs->xs_control & XS_CTL_POLL) == 0)
1209 callout_stop(&xs->xs_callout);
1210 esiop_cmd->cmd_c.status = CMDST_FREE;
1211 TAILQ_INSERT_TAIL(&sc->free_list, esiop_cmd, next);
1212 #if 0
1213 if (xs->resid != 0)
1214 printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1215 #endif
1216 scsipi_done (xs);
1219 void
1220 esiop_checkdone(struct esiop_softc *sc)
1222 int target, lun, tag;
1223 struct esiop_target *esiop_target;
1224 struct esiop_lun *esiop_lun;
1225 struct esiop_cmd *esiop_cmd;
1226 uint32_t slot;
1227 int needsync = 0;
1228 int status;
1229 uint32_t sem, offset;
1231 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1232 sem = esiop_script_read(sc, sc->sc_semoffset);
1233 esiop_script_write(sc, sc->sc_semoffset, sem & ~A_sem_done);
1234 if ((sc->sc_flags & SCF_CHAN_NOSLOT) && (sem & A_sem_start)) {
1236 * at last one command have been started,
1237 * so we should have free slots now
1239 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1240 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1242 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1244 if ((sem & A_sem_done) == 0) {
1245 /* no pending done command */
1246 return;
1249 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1250 sc->sc_done_offset, A_ndone_slots * sizeof(uint32_t),
1251 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1252 next:
1253 if (sc->sc_done_slot[sc->sc_currdoneslot] == 0) {
1254 if (needsync)
1255 bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_done_map,
1256 sc->sc_done_offset,
1257 A_ndone_slots * sizeof(uint32_t),
1258 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1259 return;
1262 needsync = 1;
1264 slot = htole32(sc->sc_done_slot[sc->sc_currdoneslot]);
1265 sc->sc_done_slot[sc->sc_currdoneslot] = 0;
1266 sc->sc_currdoneslot += 1;
1267 if (sc->sc_currdoneslot == A_ndone_slots)
1268 sc->sc_currdoneslot = 0;
1270 target = (slot & A_f_c_target) ? (slot >> 8) & 0xff : -1;
1271 lun = (slot & A_f_c_lun) ? (slot >> 16) & 0xff : -1;
1272 tag = (slot & A_f_c_tag) ? (slot >> 24) & 0xff : -1;
1274 esiop_target = (target >= 0) ?
1275 (struct esiop_target *)sc->sc_c.targets[target] : NULL;
1276 if (esiop_target == NULL) {
1277 printf("esiop_target (target %d) not valid\n", target);
1278 goto next;
1280 esiop_lun = (lun >= 0) ? esiop_target->esiop_lun[lun] : NULL;
1281 if (esiop_lun == NULL) {
1282 printf("esiop_lun (target %d lun %d) not valid\n",
1283 target, lun);
1284 goto next;
1286 esiop_cmd = (tag >= 0) ? esiop_lun->tactive[tag] : esiop_lun->active;
1287 if (esiop_cmd == NULL) {
1288 printf("esiop_cmd (target %d lun %d tag %d) not valid\n",
1289 target, lun, tag);
1290 goto next;
1293 esiop_table_sync(esiop_cmd,
1294 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1295 status = le32toh(esiop_cmd->cmd_tables->status);
1296 #ifdef DIAGNOSTIC
1297 if (status != SCSI_OK) {
1298 printf("command for T/L/Q %d/%d/%d status %d\n",
1299 target, lun, tag, status);
1300 goto next;
1303 #endif
1304 /* Ok, this command has been handled */
1305 esiop_cmd->cmd_c.xs->status = status;
1306 if (tag >= 0)
1307 esiop_lun->tactive[tag] = NULL;
1308 else
1309 esiop_lun->active = NULL;
1311 * scratcha was eventually saved in saved_offset by script.
1312 * fetch offset from it
1314 offset = 0;
1315 if (ESIOP_XFER(esiop_cmd, saved_offset) != htole32(SIOP_NOOFFSET))
1316 offset =
1317 (le32toh(ESIOP_XFER(esiop_cmd, saved_offset)) >> 8) & 0xff;
1318 esiop_scsicmd_end(esiop_cmd, offset);
1319 goto next;
1322 void
1323 esiop_unqueue(struct esiop_softc *sc, int target, int lun)
1325 int slot, tag;
1326 uint32_t slotdsa;
1327 struct esiop_cmd *esiop_cmd;
1328 struct esiop_lun *esiop_lun =
1329 ((struct esiop_target *)sc->sc_c.targets[target])->esiop_lun[lun];
1331 /* first make sure to read valid data */
1332 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1334 for (tag = 0; tag < ESIOP_NTAG; tag++) {
1335 /* look for commands in the scheduler, not yet started */
1336 if (esiop_lun->tactive[tag] == NULL)
1337 continue;
1338 esiop_cmd = esiop_lun->tactive[tag];
1339 for (slot = 0; slot < A_ncmd_slots; slot++) {
1340 slotdsa = esiop_script_read(sc,
1341 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1342 /* if the slot has any flag, it won't match the DSA */
1343 if (slotdsa == esiop_cmd->cmd_c.dsa) { /* found it */
1344 /* Mark this slot as ignore */
1345 esiop_script_write(sc,
1346 sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1347 esiop_cmd->cmd_c.dsa | A_f_cmd_ignore);
1348 /* ask to requeue */
1349 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1350 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1351 esiop_lun->tactive[tag] = NULL;
1352 esiop_scsicmd_end(esiop_cmd, 0);
1353 break;
1357 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1361 * handle a rejected queue tag message: the command will run untagged,
1362 * has to adjust the reselect script.
1367 esiop_handle_qtag_reject(struct esiop_cmd *esiop_cmd)
1369 struct esiop_softc *sc = (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1370 int target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1371 int lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1372 int tag = esiop_cmd->cmd_tables->msg_out[2];
1373 struct esiop_target *esiop_target =
1374 (struct esiop_target*)sc->sc_c.targets[target];
1375 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
1377 #ifdef SIOP_DEBUG
1378 printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1379 device_xname(sc->sc_c.sc_dev), target, lun, tag,
1380 esiop_cmd->cmd_c.tag, esiop_cmd->cmd_c.status);
1381 #endif
1383 if (esiop_lun->active != NULL) {
1384 aprint_error_dev(sc->sc_c.sc_dev,
1385 "untagged command already running for target %d "
1386 "lun %d (status %d)\n",
1387 target, lun, esiop_lun->active->cmd_c.status);
1388 return -1;
1390 /* clear tag slot */
1391 esiop_lun->tactive[tag] = NULL;
1392 /* add command to non-tagged slot */
1393 esiop_lun->active = esiop_cmd;
1394 esiop_cmd->cmd_c.flags &= ~CMDFL_TAG;
1395 esiop_cmd->cmd_c.tag = -1;
1396 /* update DSA table */
1397 esiop_script_write(sc, esiop_target->lun_table_offset +
1398 lun * 2 + A_target_luntbl / sizeof(uint32_t),
1399 esiop_cmd->cmd_c.dsa);
1400 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1401 return 0;
1405 * handle a bus reset: reset chip, unqueue all active commands, free all
1406 * target struct and report lossage to upper layer.
1407 * As the upper layer may requeue immediatly we have to first store
1408 * all active commands in a temporary queue.
1410 void
1411 esiop_handle_reset(struct esiop_softc *sc)
1413 struct esiop_cmd *esiop_cmd;
1414 struct esiop_lun *esiop_lun;
1415 int target, lun, tag;
1417 * scsi bus reset. reset the chip and restart
1418 * the queue. Need to clean up all active commands
1420 printf("%s: scsi bus reset\n", device_xname(sc->sc_c.sc_dev));
1421 /* stop, reset and restart the chip */
1422 esiop_reset(sc);
1424 if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1425 /* chip has been reset, all slots are free now */
1426 sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1427 scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1430 * Process all commands: first commands completes, then commands
1431 * being executed
1433 esiop_checkdone(sc);
1434 for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets; target++) {
1435 struct esiop_target *esiop_target =
1436 (struct esiop_target *)sc->sc_c.targets[target];
1437 if (esiop_target == NULL)
1438 continue;
1439 for (lun = 0; lun < 8; lun++) {
1440 esiop_lun = esiop_target->esiop_lun[lun];
1441 if (esiop_lun == NULL)
1442 continue;
1443 for (tag = -1; tag <
1444 ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1445 ESIOP_NTAG : 0);
1446 tag++) {
1447 if (tag >= 0)
1448 esiop_cmd = esiop_lun->tactive[tag];
1449 else
1450 esiop_cmd = esiop_lun->active;
1451 if (esiop_cmd == NULL)
1452 continue;
1453 scsipi_printaddr(
1454 esiop_cmd->cmd_c.xs->xs_periph);
1455 printf("command with tag id %d reset\n", tag);
1456 esiop_cmd->cmd_c.xs->error =
1457 (esiop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1458 XS_TIMEOUT : XS_RESET;
1459 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1460 if (tag >= 0)
1461 esiop_lun->tactive[tag] = NULL;
1462 else
1463 esiop_lun->active = NULL;
1464 esiop_cmd->cmd_c.status = CMDST_DONE;
1465 esiop_scsicmd_end(esiop_cmd, 0);
1468 sc->sc_c.targets[target]->status = TARST_ASYNC;
1469 sc->sc_c.targets[target]->flags &= ~(TARF_ISWIDE | TARF_ISDT);
1470 sc->sc_c.targets[target]->period =
1471 sc->sc_c.targets[target]->offset = 0;
1472 siop_update_xfer_mode(&sc->sc_c, target);
1475 scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1478 void
1479 esiop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1480 void *arg)
1482 struct scsipi_xfer *xs;
1483 struct scsipi_periph *periph;
1484 struct esiop_softc *sc = device_private(chan->chan_adapter->adapt_dev);
1485 struct esiop_cmd *esiop_cmd;
1486 struct esiop_target *esiop_target;
1487 int s, error, i;
1488 int target;
1489 int lun;
1491 switch (req) {
1492 case ADAPTER_REQ_RUN_XFER:
1493 xs = arg;
1494 periph = xs->xs_periph;
1495 target = periph->periph_target;
1496 lun = periph->periph_lun;
1498 s = splbio();
1500 * first check if there are pending complete commands.
1501 * this can free us some resources (in the rings for example).
1502 * we have to lock it to avoid recursion.
1504 if ((sc->sc_flags & SCF_CHAN_ADAPTREQ) == 0) {
1505 sc->sc_flags |= SCF_CHAN_ADAPTREQ;
1506 esiop_checkdone(sc);
1507 sc->sc_flags &= ~SCF_CHAN_ADAPTREQ;
1509 #ifdef SIOP_DEBUG_SCHED
1510 printf("starting cmd for %d:%d tag %d(%d)\n", target, lun,
1511 xs->xs_tag_type, xs->xs_tag_id);
1512 #endif
1513 esiop_cmd = TAILQ_FIRST(&sc->free_list);
1514 if (esiop_cmd == NULL) {
1515 xs->error = XS_RESOURCE_SHORTAGE;
1516 scsipi_done(xs);
1517 splx(s);
1518 return;
1520 TAILQ_REMOVE(&sc->free_list, esiop_cmd, next);
1521 #ifdef DIAGNOSTIC
1522 if (esiop_cmd->cmd_c.status != CMDST_FREE)
1523 panic("siop_scsicmd: new cmd not free");
1524 #endif
1525 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1526 if (esiop_target == NULL) {
1527 #ifdef SIOP_DEBUG
1528 printf("%s: alloc siop_target for target %d\n",
1529 device_xname(sc->sc_c.sc_dev), target);
1530 #endif
1531 sc->sc_c.targets[target] =
1532 malloc(sizeof(struct esiop_target),
1533 M_DEVBUF, M_NOWAIT | M_ZERO);
1534 if (sc->sc_c.targets[target] == NULL) {
1535 aprint_error_dev(sc->sc_c.sc_dev,
1536 "can't malloc memory for "
1537 "target %d\n",
1538 target);
1539 xs->error = XS_RESOURCE_SHORTAGE;
1540 scsipi_done(xs);
1541 splx(s);
1542 return;
1544 esiop_target =
1545 (struct esiop_target*)sc->sc_c.targets[target];
1546 esiop_target->target_c.status = TARST_PROBING;
1547 esiop_target->target_c.flags = 0;
1548 esiop_target->target_c.id =
1549 sc->sc_c.clock_div << 24; /* scntl3 */
1550 esiop_target->target_c.id |= target << 16; /* id */
1551 /* esiop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1553 for (i=0; i < 8; i++)
1554 esiop_target->esiop_lun[i] = NULL;
1555 esiop_target_register(sc, target);
1557 if (esiop_target->esiop_lun[lun] == NULL) {
1558 esiop_target->esiop_lun[lun] =
1559 malloc(sizeof(struct esiop_lun), M_DEVBUF,
1560 M_NOWAIT|M_ZERO);
1561 if (esiop_target->esiop_lun[lun] == NULL) {
1562 aprint_error_dev(sc->sc_c.sc_dev,
1563 "can't alloc esiop_lun for "
1564 "target %d lun %d\n",
1565 target, lun);
1566 xs->error = XS_RESOURCE_SHORTAGE;
1567 scsipi_done(xs);
1568 splx(s);
1569 return;
1572 esiop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1573 esiop_cmd->cmd_c.xs = xs;
1574 esiop_cmd->cmd_c.flags = 0;
1575 esiop_cmd->cmd_c.status = CMDST_READY;
1577 /* load the DMA maps */
1578 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1579 esiop_cmd->cmd_c.dmamap_cmd,
1580 xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1581 if (error) {
1582 aprint_error_dev(sc->sc_c.sc_dev,
1583 "unable to load cmd DMA map: %d\n",
1584 error);
1585 xs->error = XS_DRIVER_STUFFUP;
1586 scsipi_done(xs);
1587 splx(s);
1588 return;
1590 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1591 error = bus_dmamap_load(sc->sc_c.sc_dmat,
1592 esiop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1593 NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1594 ((xs->xs_control & XS_CTL_DATA_IN) ?
1595 BUS_DMA_READ : BUS_DMA_WRITE));
1596 if (error) {
1597 aprint_error_dev(sc->sc_c.sc_dev,
1598 "unable to load cmd DMA map: %d",
1599 error);
1600 xs->error = XS_DRIVER_STUFFUP;
1601 scsipi_done(xs);
1602 bus_dmamap_unload(sc->sc_c.sc_dmat,
1603 esiop_cmd->cmd_c.dmamap_cmd);
1604 splx(s);
1605 return;
1607 bus_dmamap_sync(sc->sc_c.sc_dmat,
1608 esiop_cmd->cmd_c.dmamap_data, 0,
1609 esiop_cmd->cmd_c.dmamap_data->dm_mapsize,
1610 (xs->xs_control & XS_CTL_DATA_IN) ?
1611 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1613 bus_dmamap_sync(sc->sc_c.sc_dmat, esiop_cmd->cmd_c.dmamap_cmd,
1614 0, esiop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1615 BUS_DMASYNC_PREWRITE);
1617 if (xs->xs_tag_type)
1618 esiop_cmd->cmd_c.tag = xs->xs_tag_id;
1619 else
1620 esiop_cmd->cmd_c.tag = -1;
1621 siop_setuptables(&esiop_cmd->cmd_c);
1622 ESIOP_XFER(esiop_cmd, saved_offset) = htole32(SIOP_NOOFFSET);
1623 ESIOP_XFER(esiop_cmd, tlq) = htole32(A_f_c_target | A_f_c_lun);
1624 ESIOP_XFER(esiop_cmd, tlq) |=
1625 htole32((target << 8) | (lun << 16));
1626 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1627 ESIOP_XFER(esiop_cmd, tlq) |= htole32(A_f_c_tag);
1628 ESIOP_XFER(esiop_cmd, tlq) |=
1629 htole32(esiop_cmd->cmd_c.tag << 24);
1632 esiop_table_sync(esiop_cmd,
1633 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1634 esiop_start(sc, esiop_cmd);
1635 if (xs->xs_control & XS_CTL_POLL) {
1636 /* poll for command completion */
1637 while ((xs->xs_status & XS_STS_DONE) == 0) {
1638 delay(1000);
1639 esiop_intr(sc);
1642 splx(s);
1643 return;
1645 case ADAPTER_REQ_GROW_RESOURCES:
1646 #ifdef SIOP_DEBUG
1647 printf("%s grow resources (%d)\n",
1648 device_xname(sc->sc_c.sc_dev),
1649 sc->sc_c.sc_adapt.adapt_openings);
1650 #endif
1651 esiop_morecbd(sc);
1652 return;
1654 case ADAPTER_REQ_SET_XFER_MODE:
1656 struct scsipi_xfer_mode *xm = arg;
1657 if (sc->sc_c.targets[xm->xm_target] == NULL)
1658 return;
1659 s = splbio();
1660 if (xm->xm_mode & PERIPH_CAP_TQING) {
1661 sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1662 /* allocate tag tables for this device */
1663 for (lun = 0;
1664 lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1665 if (scsipi_lookup_periph(chan,
1666 xm->xm_target, lun) != NULL)
1667 esiop_add_dev(sc, xm->xm_target, lun);
1670 if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1671 (sc->sc_c.features & SF_BUS_WIDE))
1672 sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1673 if (xm->xm_mode & PERIPH_CAP_SYNC)
1674 sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1675 if ((xm->xm_mode & PERIPH_CAP_DT) &&
1676 (sc->sc_c.features & SF_CHIP_DT))
1677 sc->sc_c.targets[xm->xm_target]->flags |= TARF_DT;
1678 if ((xm->xm_mode &
1679 (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16 | PERIPH_CAP_DT)) ||
1680 sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1681 sc->sc_c.targets[xm->xm_target]->status = TARST_ASYNC;
1683 splx(s);
1688 static void
1689 esiop_start(struct esiop_softc *sc, struct esiop_cmd *esiop_cmd)
1691 struct esiop_lun *esiop_lun;
1692 struct esiop_target *esiop_target;
1693 int timeout;
1694 int target, lun, slot;
1697 * first make sure to read valid data
1699 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1702 * We use a circular queue here. sc->sc_currschedslot points to a
1703 * free slot, unless we have filled the queue. Check this.
1705 slot = sc->sc_currschedslot;
1706 if ((esiop_script_read(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE) &
1707 A_f_cmd_free) == 0) {
1709 * no more free slot, no need to continue. freeze the queue
1710 * and requeue this command.
1712 scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1713 sc->sc_flags |= SCF_CHAN_NOSLOT;
1714 esiop_script_write(sc, sc->sc_semoffset,
1715 esiop_script_read(sc, sc->sc_semoffset) & ~A_sem_start);
1716 esiop_script_sync(sc,
1717 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1718 esiop_cmd->cmd_c.xs->error = XS_REQUEUE;
1719 esiop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1720 esiop_scsicmd_end(esiop_cmd, 0);
1721 return;
1723 /* OK, we can use this slot */
1725 target = esiop_cmd->cmd_c.xs->xs_periph->periph_target;
1726 lun = esiop_cmd->cmd_c.xs->xs_periph->periph_lun;
1727 esiop_target = (struct esiop_target*)sc->sc_c.targets[target];
1728 esiop_lun = esiop_target->esiop_lun[lun];
1729 /* if non-tagged command active, panic: this shouldn't happen */
1730 if (esiop_lun->active != NULL) {
1731 panic("esiop_start: tagged cmd while untagged running");
1733 #ifdef DIAGNOSTIC
1734 /* sanity check the tag if needed */
1735 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1736 if (esiop_cmd->cmd_c.tag >= ESIOP_NTAG ||
1737 esiop_cmd->cmd_c.tag < 0) {
1738 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1739 printf(": tag id %d\n", esiop_cmd->cmd_c.tag);
1740 panic("esiop_start: invalid tag id");
1742 if (esiop_lun->tactive[esiop_cmd->cmd_c.tag] != NULL)
1743 panic("esiop_start: tag not free");
1745 #endif
1746 #ifdef SIOP_DEBUG_SCHED
1747 printf("using slot %d for DSA 0x%lx\n", slot,
1748 (u_long)esiop_cmd->cmd_c.dsa);
1749 #endif
1750 /* mark command as active */
1751 if (esiop_cmd->cmd_c.status == CMDST_READY)
1752 esiop_cmd->cmd_c.status = CMDST_ACTIVE;
1753 else
1754 panic("esiop_start: bad status");
1755 /* DSA table for reselect */
1756 if (esiop_cmd->cmd_c.flags & CMDFL_TAG) {
1757 esiop_lun->tactive[esiop_cmd->cmd_c.tag] = esiop_cmd;
1758 /* DSA table for reselect */
1759 esiop_lun->lun_tagtbl->tbl[esiop_cmd->cmd_c.tag] =
1760 htole32(esiop_cmd->cmd_c.dsa);
1761 bus_dmamap_sync(sc->sc_c.sc_dmat,
1762 esiop_lun->lun_tagtbl->tblblk->blkmap,
1763 esiop_lun->lun_tagtbl->tbl_offset,
1764 sizeof(uint32_t) * ESIOP_NTAG, BUS_DMASYNC_PREWRITE);
1765 } else {
1766 esiop_lun->active = esiop_cmd;
1767 esiop_script_write(sc,
1768 esiop_target->lun_table_offset +
1769 lun * 2 + A_target_luntbl / sizeof(uint32_t),
1770 esiop_cmd->cmd_c.dsa);
1772 /* scheduler slot: DSA */
1773 esiop_script_write(sc, sc->sc_shedoffset + slot * CMD_SLOTSIZE,
1774 esiop_cmd->cmd_c.dsa);
1775 /* make sure SCRIPT processor will read valid data */
1776 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1777 /* handle timeout */
1778 if ((esiop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1779 /* start exire timer */
1780 timeout = mstohz(esiop_cmd->cmd_c.xs->timeout);
1781 if (timeout == 0)
1782 timeout = 1;
1783 callout_reset( &esiop_cmd->cmd_c.xs->xs_callout,
1784 timeout, esiop_timeout, esiop_cmd);
1786 /* Signal script it has some work to do */
1787 bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1788 SIOP_ISTAT, ISTAT_SIGP);
1789 /* update the current slot, and wait for IRQ */
1790 sc->sc_currschedslot++;
1791 if (sc->sc_currschedslot >= A_ncmd_slots)
1792 sc->sc_currschedslot = 0;
1795 void
1796 esiop_timeout(void *v)
1798 struct esiop_cmd *esiop_cmd = v;
1799 struct esiop_softc *sc =
1800 (struct esiop_softc *)esiop_cmd->cmd_c.siop_sc;
1801 int s;
1802 #ifdef SIOP_DEBUG
1803 int slot, slotdsa;
1804 #endif
1806 s = splbio();
1807 esiop_table_sync(esiop_cmd,
1808 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1809 scsipi_printaddr(esiop_cmd->cmd_c.xs->xs_periph);
1810 #ifdef SIOP_DEBUG
1811 printf("command timeout (status %d)\n",
1812 le32toh(esiop_cmd->cmd_tables->status));
1814 esiop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1815 for (slot = 0; slot < A_ncmd_slots; slot++) {
1816 slotdsa = esiop_script_read(sc,
1817 sc->sc_shedoffset + slot * CMD_SLOTSIZE);
1818 if ((slotdsa & 0x01) == 0)
1819 printf("slot %d not free (0x%x)\n", slot, slotdsa);
1821 printf("istat 0x%x ",
1822 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1823 printf("DSP 0x%lx DSA 0x%x\n",
1824 (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP)
1825 - sc->sc_c.sc_scriptaddr),
1826 bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
1827 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_CTEST2);
1828 printf("istat 0x%x\n",
1829 bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT));
1830 #else
1831 printf("command timeout, CDB: ");
1832 scsipi_print_cdb(esiop_cmd->cmd_c.xs->cmd);
1833 printf("\n");
1834 #endif
1835 /* reset the scsi bus */
1836 siop_resetbus(&sc->sc_c);
1838 /* deactivate callout */
1839 callout_stop(&esiop_cmd->cmd_c.xs->xs_callout);
1841 * mark command has being timed out and just return;
1842 * the bus reset will generate an interrupt,
1843 * it will be handled in siop_intr()
1845 esiop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1846 splx(s);
1849 void
1850 esiop_dump_script(struct esiop_softc *sc)
1852 int i;
1854 for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1855 printf("0x%04x: 0x%08x 0x%08x", i * 4,
1856 le32toh(sc->sc_c.sc_script[i]),
1857 le32toh(sc->sc_c.sc_script[i + 1]));
1858 if ((le32toh(sc->sc_c.sc_script[i]) & 0xe0000000) ==
1859 0xc0000000) {
1860 i++;
1861 printf(" 0x%08x", le32toh(sc->sc_c.sc_script[i + 1]));
1863 printf("\n");
1867 void
1868 esiop_morecbd(struct esiop_softc *sc)
1870 int error, i, s;
1871 bus_dma_segment_t seg;
1872 int rseg;
1873 struct esiop_cbd *newcbd;
1874 struct esiop_xfer *xfer;
1875 bus_addr_t dsa;
1877 /* allocate a new list head */
1878 newcbd = malloc(sizeof(struct esiop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1879 if (newcbd == NULL) {
1880 aprint_error_dev(sc->sc_c.sc_dev,
1881 "can't allocate memory for command descriptors "
1882 "head\n");
1883 return;
1886 /* allocate cmd list */
1887 newcbd->cmds = malloc(sizeof(struct esiop_cmd) * SIOP_NCMDPB,
1888 M_DEVBUF, M_NOWAIT|M_ZERO);
1889 if (newcbd->cmds == NULL) {
1890 aprint_error_dev(sc->sc_c.sc_dev,
1891 "can't allocate memory for command descriptors\n");
1892 goto bad3;
1894 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
1895 &seg, 1, &rseg, BUS_DMA_NOWAIT);
1896 if (error) {
1897 aprint_error_dev(sc->sc_c.sc_dev,
1898 "unable to allocate cbd DMA memory, error = %d\n",
1899 error);
1900 goto bad2;
1902 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1903 (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1904 if (error) {
1905 aprint_error_dev(sc->sc_c.sc_dev,
1906 "unable to map cbd DMA memory, error = %d\n",
1907 error);
1908 goto bad2;
1910 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1911 BUS_DMA_NOWAIT, &newcbd->xferdma);
1912 if (error) {
1913 aprint_error_dev(sc->sc_c.sc_dev,
1914 "unable to create cbd DMA map, error = %d\n", error);
1915 goto bad1;
1917 error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma,
1918 newcbd->xfers, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1919 if (error) {
1920 aprint_error_dev(sc->sc_c.sc_dev,
1921 "unable to load cbd DMA map, error = %d\n", error);
1922 goto bad0;
1924 #ifdef DEBUG
1925 printf("%s: alloc newcdb at PHY addr 0x%lx\n",
1926 device_xname(sc->sc_c.sc_dev),
1927 (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1928 #endif
1929 for (i = 0; i < SIOP_NCMDPB; i++) {
1930 error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1931 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1932 &newcbd->cmds[i].cmd_c.dmamap_data);
1933 if (error) {
1934 aprint_error_dev(sc->sc_c.sc_dev,
1935 "unable to create data DMA map for cbd: "
1936 "error %d\n", error);
1937 goto bad0;
1939 error = bus_dmamap_create(sc->sc_c.sc_dmat,
1940 sizeof(struct scsipi_generic), 1,
1941 sizeof(struct scsipi_generic), 0,
1942 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1943 &newcbd->cmds[i].cmd_c.dmamap_cmd);
1944 if (error) {
1945 aprint_error_dev(sc->sc_c.sc_dev,
1946 "unable to create cmd DMA map for cbd %d\n", error);
1947 goto bad0;
1949 newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1950 newcbd->cmds[i].esiop_cbdp = newcbd;
1951 xfer = &newcbd->xfers[i];
1952 newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1953 memset(newcbd->cmds[i].cmd_tables, 0,
1954 sizeof(struct esiop_xfer));
1955 dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1956 i * sizeof(struct esiop_xfer);
1957 newcbd->cmds[i].cmd_c.dsa = dsa;
1958 newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1959 xfer->siop_tables.t_msgout.count= htole32(1);
1960 xfer->siop_tables.t_msgout.addr = htole32(dsa);
1961 xfer->siop_tables.t_msgin.count= htole32(1);
1962 xfer->siop_tables.t_msgin.addr = htole32(dsa +
1963 offsetof(struct siop_common_xfer, msg_in));
1964 xfer->siop_tables.t_extmsgin.count= htole32(2);
1965 xfer->siop_tables.t_extmsgin.addr = htole32(dsa +
1966 offsetof(struct siop_common_xfer, msg_in) + 1);
1967 xfer->siop_tables.t_extmsgdata.addr = htole32(dsa +
1968 offsetof(struct siop_common_xfer, msg_in) + 3);
1969 xfer->siop_tables.t_status.count= htole32(1);
1970 xfer->siop_tables.t_status.addr = htole32(dsa +
1971 offsetof(struct siop_common_xfer, status));
1973 s = splbio();
1974 TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1975 splx(s);
1976 #ifdef SIOP_DEBUG
1977 printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1978 le32toh(newcbd->cmds[i].cmd_tables->t_msgin.addr),
1979 le32toh(newcbd->cmds[i].cmd_tables->t_msgout.addr),
1980 le32toh(newcbd->cmds[i].cmd_tables->t_status.addr));
1981 #endif
1983 s = splbio();
1984 TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1985 sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1986 splx(s);
1987 return;
1988 bad0:
1989 bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1990 bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1991 bad1:
1992 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1993 bad2:
1994 free(newcbd->cmds, M_DEVBUF);
1995 bad3:
1996 free(newcbd, M_DEVBUF);
1999 void
2000 esiop_moretagtbl(struct esiop_softc *sc)
2002 int error, i, j, s;
2003 bus_dma_segment_t seg;
2004 int rseg;
2005 struct esiop_dsatblblk *newtblblk;
2006 struct esiop_dsatbl *newtbls;
2007 uint32_t *tbls;
2009 /* allocate a new list head */
2010 newtblblk = malloc(sizeof(struct esiop_dsatblblk),
2011 M_DEVBUF, M_NOWAIT|M_ZERO);
2012 if (newtblblk == NULL) {
2013 aprint_error_dev(sc->sc_c.sc_dev,
2014 "can't allocate memory for tag DSA table block\n");
2015 return;
2018 /* allocate tbl list */
2019 newtbls = malloc(sizeof(struct esiop_dsatbl) * ESIOP_NTPB,
2020 M_DEVBUF, M_NOWAIT|M_ZERO);
2021 if (newtbls == NULL) {
2022 aprint_error_dev(sc->sc_c.sc_dev,
2023 "can't allocate memory for command descriptors\n");
2024 goto bad3;
2026 error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
2027 &seg, 1, &rseg, BUS_DMA_NOWAIT);
2028 if (error) {
2029 aprint_error_dev(sc->sc_c.sc_dev,
2030 "unable to allocate tbl DMA memory, error = %d\n", error);
2031 goto bad2;
2033 error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
2034 (void *)&tbls, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
2035 if (error) {
2036 aprint_error_dev(sc->sc_c.sc_dev,
2037 "unable to map tbls DMA memory, error = %d\n", error);
2038 goto bad2;
2040 error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
2041 BUS_DMA_NOWAIT, &newtblblk->blkmap);
2042 if (error) {
2043 aprint_error_dev(sc->sc_c.sc_dev,
2044 "unable to create tbl DMA map, error = %d\n", error);
2045 goto bad1;
2047 error = bus_dmamap_load(sc->sc_c.sc_dmat, newtblblk->blkmap,
2048 tbls, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
2049 if (error) {
2050 aprint_error_dev(sc->sc_c.sc_dev,
2051 "unable to load tbl DMA map, error = %d\n", error);
2052 goto bad0;
2054 #ifdef DEBUG
2055 printf("%s: alloc new tag DSA table at PHY addr 0x%lx\n",
2056 device_xname(sc->sc_c.sc_dev),
2057 (unsigned long)newtblblk->blkmap->dm_segs[0].ds_addr);
2058 #endif
2059 for (i = 0; i < ESIOP_NTPB; i++) {
2060 newtbls[i].tblblk = newtblblk;
2061 newtbls[i].tbl = &tbls[i * ESIOP_NTAG];
2062 newtbls[i].tbl_offset = i * ESIOP_NTAG * sizeof(uint32_t);
2063 newtbls[i].tbl_dsa = newtblblk->blkmap->dm_segs[0].ds_addr +
2064 newtbls[i].tbl_offset;
2065 for (j = 0; j < ESIOP_NTAG; j++)
2066 newtbls[i].tbl[j] = j;
2067 s = splbio();
2068 TAILQ_INSERT_TAIL(&sc->free_tagtbl, &newtbls[i], next);
2069 splx(s);
2071 s = splbio();
2072 TAILQ_INSERT_TAIL(&sc->tag_tblblk, newtblblk, next);
2073 splx(s);
2074 return;
2075 bad0:
2076 bus_dmamap_unload(sc->sc_c.sc_dmat, newtblblk->blkmap);
2077 bus_dmamap_destroy(sc->sc_c.sc_dmat, newtblblk->blkmap);
2078 bad1:
2079 bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
2080 bad2:
2081 free(newtbls, M_DEVBUF);
2082 bad3:
2083 free(newtblblk, M_DEVBUF);
2086 void
2087 esiop_update_scntl3(struct esiop_softc *sc,
2088 struct siop_common_target *_siop_target)
2090 struct esiop_target *esiop_target = (struct esiop_target *)_siop_target;
2092 esiop_script_write(sc, esiop_target->lun_table_offset,
2093 esiop_target->target_c.id);
2094 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2097 void
2098 esiop_add_dev(struct esiop_softc *sc, int target, int lun)
2100 struct esiop_target *esiop_target =
2101 (struct esiop_target *)sc->sc_c.targets[target];
2102 struct esiop_lun *esiop_lun = esiop_target->esiop_lun[lun];
2104 if (esiop_lun->lun_tagtbl != NULL)
2105 return; /* already allocated */
2107 /* we need a tag DSA table */
2108 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2109 if (esiop_lun->lun_tagtbl == NULL) {
2110 esiop_moretagtbl(sc);
2111 esiop_lun->lun_tagtbl= TAILQ_FIRST(&sc->free_tagtbl);
2112 if (esiop_lun->lun_tagtbl == NULL) {
2113 /* no resources, run untagged */
2114 esiop_target->target_c.flags &= ~TARF_TAG;
2115 return;
2118 TAILQ_REMOVE(&sc->free_tagtbl, esiop_lun->lun_tagtbl, next);
2119 /* Update LUN DSA table */
2120 esiop_script_write(sc, esiop_target->lun_table_offset +
2121 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2122 esiop_lun->lun_tagtbl->tbl_dsa);
2123 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2126 void
2127 esiop_del_dev(struct esiop_softc *sc, int target, int lun)
2129 struct esiop_target *esiop_target;
2131 #ifdef SIOP_DEBUG
2132 printf("%s:%d:%d: free lun sw entry\n",
2133 device_xname(sc->sc_c.sc_dev), target, lun);
2134 #endif
2135 if (sc->sc_c.targets[target] == NULL)
2136 return;
2137 esiop_target = (struct esiop_target *)sc->sc_c.targets[target];
2138 free(esiop_target->esiop_lun[lun], M_DEVBUF);
2139 esiop_target->esiop_lun[lun] = NULL;
2142 void
2143 esiop_target_register(struct esiop_softc *sc, uint32_t target)
2145 struct esiop_target *esiop_target =
2146 (struct esiop_target *)sc->sc_c.targets[target];
2147 struct esiop_lun *esiop_lun;
2148 int lun;
2150 /* get a DSA table for this target */
2151 esiop_target->lun_table_offset = sc->sc_free_offset;
2152 sc->sc_free_offset += sc->sc_c.sc_chan.chan_nluns * 2 + 2;
2153 #ifdef SIOP_DEBUG
2154 printf("%s: lun table for target %d offset %d free offset %d\n",
2155 device_xname(sc->sc_c.sc_dev), target,
2156 esiop_target->lun_table_offset,
2157 sc->sc_free_offset);
2158 #endif
2159 /* first 32 bytes are ID (for select) */
2160 esiop_script_write(sc, esiop_target->lun_table_offset,
2161 esiop_target->target_c.id);
2162 /* Record this table in the target DSA table */
2163 esiop_script_write(sc,
2164 sc->sc_target_table_offset + target,
2165 (esiop_target->lun_table_offset * sizeof(uint32_t)) +
2166 sc->sc_c.sc_scriptaddr);
2167 /* if we have a tag table, register it */
2168 for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
2169 esiop_lun = esiop_target->esiop_lun[lun];
2170 if (esiop_lun == NULL)
2171 continue;
2172 if (esiop_lun->lun_tagtbl)
2173 esiop_script_write(sc, esiop_target->lun_table_offset +
2174 lun * 2 + A_target_luntbl_tag / sizeof(uint32_t),
2175 esiop_lun->lun_tagtbl->tbl_dsa);
2177 esiop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2180 #ifdef SIOP_STATS
2181 void
2182 esiop_printstats(void)
2185 printf("esiop_stat_intr %d\n", esiop_stat_intr);
2186 printf("esiop_stat_intr_shortxfer %d\n", esiop_stat_intr_shortxfer);
2187 printf("esiop_stat_intr_xferdisc %d\n", esiop_stat_intr_xferdisc);
2188 printf("esiop_stat_intr_sdp %d\n", esiop_stat_intr_sdp);
2189 printf("esiop_stat_intr_done %d\n", esiop_stat_intr_done);
2190 printf("esiop_stat_intr_lunresel %d\n", esiop_stat_intr_lunresel);
2191 printf("esiop_stat_intr_qfull %d\n", esiop_stat_intr_qfull);
2193 #endif