No empty .Rs/.Re
[netbsd-mini2440.git] / sys / dev / ic / siop_common.c
blob770d0fdcbde1d86cfd153d5ed5717d5548fef4f3
1 /* $NetBSD: siop_common.c,v 1.51 2009/09/04 18:29:52 tsutsui Exp $ */
3 /*
4 * Copyright (c) 2000, 2002 Manuel Bouyer.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.51 2009/09/04 18:29:52 tsutsui Exp $");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/device.h>
36 #include <sys/malloc.h>
37 #include <sys/buf.h>
38 #include <sys/kernel.h>
39 #include <sys/scsiio.h>
41 #include <uvm/uvm_extern.h>
43 #include <machine/endian.h>
44 #include <sys/bus.h>
46 #include <dev/scsipi/scsi_all.h>
47 #include <dev/scsipi/scsi_message.h>
48 #include <dev/scsipi/scsipi_all.h>
50 #include <dev/scsipi/scsiconf.h>
52 #include <dev/ic/siopreg.h>
53 #include <dev/ic/siopvar_common.h>
55 #include "opt_siop.h"
57 #undef DEBUG
58 #undef DEBUG_DR
59 #undef DEBUG_NEG
61 int
62 siop_common_attach(struct siop_common_softc *sc)
64 int error, i;
65 bus_dma_segment_t seg;
66 int rseg;
69 * Allocate DMA-safe memory for the script and map it.
71 if ((sc->features & SF_CHIP_RAM) == 0) {
72 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
73 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
74 if (error) {
75 aprint_error_dev(sc->sc_dev,
76 "unable to allocate script DMA memory, "
77 "error = %d\n", error);
78 return error;
80 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
81 (void **)&sc->sc_script,
82 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
83 if (error) {
84 aprint_error_dev(sc->sc_dev,
85 "unable to map script DMA memory, "
86 "error = %d\n", error);
87 return error;
89 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
90 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
91 if (error) {
92 aprint_error_dev(sc->sc_dev,
93 "unable to create script DMA map, "
94 "error = %d\n", error);
95 return error;
97 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
98 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
99 if (error) {
100 aprint_error_dev(sc->sc_dev,
101 "unable to load script DMA map, "
102 "error = %d\n", error);
103 return error;
105 sc->sc_scriptaddr =
106 sc->sc_scriptdma->dm_segs[0].ds_addr;
107 sc->ram_size = PAGE_SIZE;
110 sc->sc_adapt.adapt_dev = sc->sc_dev;
111 sc->sc_adapt.adapt_nchannels = 1;
112 sc->sc_adapt.adapt_openings = 0;
113 sc->sc_adapt.adapt_ioctl = siop_ioctl;
114 sc->sc_adapt.adapt_minphys = minphys;
116 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
117 sc->sc_chan.chan_adapter = &sc->sc_adapt;
118 sc->sc_chan.chan_bustype = &scsi_bustype;
119 sc->sc_chan.chan_channel = 0;
120 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
121 sc->sc_chan.chan_ntargets =
122 (sc->features & SF_BUS_WIDE) ? 16 : 8;
123 sc->sc_chan.chan_nluns = 8;
124 sc->sc_chan.chan_id =
125 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
126 if (sc->sc_chan.chan_id == 0 ||
127 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
128 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
130 for (i = 0; i < 16; i++)
131 sc->targets[i] = NULL;
133 /* find min/max sync period for this chip */
134 sc->st_maxsync = 0;
135 sc->dt_maxsync = 0;
136 sc->st_minsync = 255;
137 sc->dt_minsync = 255;
138 for (i = 0; i < __arraycount(scf_period); i++) {
139 if (sc->clock_period != scf_period[i].clock)
140 continue;
141 if (sc->st_maxsync < scf_period[i].period)
142 sc->st_maxsync = scf_period[i].period;
143 if (sc->st_minsync > scf_period[i].period)
144 sc->st_minsync = scf_period[i].period;
146 if (sc->st_maxsync == 255 || sc->st_minsync == 0)
147 panic("siop: can't find my sync parameters");
148 for (i = 0; i < __arraycount(dt_scf_period); i++) {
149 if (sc->clock_period != dt_scf_period[i].clock)
150 continue;
151 if (sc->dt_maxsync < dt_scf_period[i].period)
152 sc->dt_maxsync = dt_scf_period[i].period;
153 if (sc->dt_minsync > dt_scf_period[i].period)
154 sc->dt_minsync = dt_scf_period[i].period;
156 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
157 panic("siop: can't find my sync parameters");
158 return 0;
161 void
162 siop_common_reset(struct siop_common_softc *sc)
164 u_int32_t stest1, stest3;
166 /* reset the chip */
167 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
168 delay(1000);
169 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
171 /* init registers */
172 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
173 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
174 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
175 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
176 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
177 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
179 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
181 0xff & ~(SIEN1_HTH | SIEN1_GEN));
182 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
183 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
184 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
185 (0xb << STIME0_SEL_SHIFT));
186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
187 sc->sc_chan.chan_id | SCID_RRE);
188 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
189 1 << sc->sc_chan.chan_id);
190 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
191 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
192 if (sc->features & SF_CHIP_AAIP)
193 bus_space_write_1(sc->sc_rt, sc->sc_rh,
194 SIOP_AIPCNTL1, AIPCNTL1_DIS);
196 /* enable clock doubler or quadruler if appropriate */
197 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
198 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
199 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
200 STEST1_DBLEN);
201 if (sc->features & SF_CHIP_QUAD) {
202 /* wait for PPL to lock */
203 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
204 SIOP_STEST4) & STEST4_LOCK) == 0)
205 delay(10);
206 } else {
207 /* data sheet says 20us - more won't hurt */
208 delay(100);
210 /* halt scsi clock, select doubler/quad, restart clock */
211 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
212 stest3 | STEST3_HSC);
213 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
214 STEST1_DBLEN | STEST1_DBLSEL);
215 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
216 } else {
217 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
220 if (sc->features & SF_CHIP_USEPCIC) {
221 stest1 = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_STEST1);
222 stest1 |= STEST1_SCLK;
223 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, stest1);
226 if (sc->features & SF_CHIP_FIFO)
227 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
228 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
229 CTEST5_DFS);
230 if (sc->features & SF_CHIP_LED0) {
231 /* Set GPIO0 as output if software LED control is required */
232 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
233 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
235 if (sc->features & SF_BUS_ULTRA3) {
236 /* reset SCNTL4 */
237 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
239 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
240 STEST4_MODE_MASK;
243 * initialise the RAM. Without this we may get scsi gross errors on
244 * the 1010
246 if (sc->features & SF_CHIP_RAM)
247 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
248 0, 0, sc->ram_size / 4);
249 sc->sc_reset(sc);
252 /* prepare tables before sending a cmd */
253 void
254 siop_setuptables(struct siop_common_cmd *siop_cmd)
256 int i;
257 struct siop_common_softc *sc = siop_cmd->siop_sc;
258 struct scsipi_xfer *xs = siop_cmd->xs;
259 int target = xs->xs_periph->periph_target;
260 int lun = xs->xs_periph->periph_lun;
261 int msgoffset = 1;
263 siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id);
264 memset(siop_cmd->siop_tables->msg_out, 0,
265 sizeof(siop_cmd->siop_tables->msg_out));
266 /* request sense doesn't disconnect */
267 if (xs->xs_control & XS_CTL_REQSENSE)
268 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
269 else if ((sc->features & SF_CHIP_GEBUG) &&
270 (sc->targets[target]->flags & TARF_ISWIDE) == 0)
272 * 1010 bug: it seems that the 1010 has problems with reselect
273 * when not in wide mode (generate false SCSI gross error).
274 * The FreeBSD sym driver has comments about it but their
275 * workaround (disable SCSI gross error reporting) doesn't
276 * work with my adapter. So disable disconnect when not
277 * wide.
279 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
280 else
281 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
282 if (xs->xs_tag_type != 0) {
283 if ((sc->targets[target]->flags & TARF_TAG) == 0) {
284 scsipi_printaddr(xs->xs_periph);
285 printf(": tagged command type %d id %d\n",
286 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
287 panic("tagged command for non-tagging device");
289 siop_cmd->flags |= CMDFL_TAG;
290 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
292 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
293 * different one
295 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
296 msgoffset = 3;
298 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset);
299 if (sc->targets[target]->status == TARST_ASYNC) {
300 if ((sc->targets[target]->flags & TARF_DT) &&
301 (sc->mode == STEST4_MODE_LVD)) {
302 sc->targets[target]->status = TARST_PPR_NEG;
303 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
304 sc->maxoff);
305 } else if (sc->targets[target]->flags & TARF_WIDE) {
306 sc->targets[target]->status = TARST_WIDE_NEG;
307 siop_wdtr_msg(siop_cmd, msgoffset,
308 MSG_EXT_WDTR_BUS_16_BIT);
309 } else if (sc->targets[target]->flags & TARF_SYNC) {
310 sc->targets[target]->status = TARST_SYNC_NEG;
311 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
312 (sc->maxoff > 31) ? 31 : sc->maxoff);
313 } else {
314 sc->targets[target]->status = TARST_OK;
315 siop_update_xfer_mode(sc, target);
318 siop_cmd->siop_tables->status =
319 siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */
321 siop_cmd->siop_tables->cmd.count =
322 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
323 siop_cmd->siop_tables->cmd.addr =
324 siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
325 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
326 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
327 siop_cmd->siop_tables->data[i].count =
328 siop_htoc32(sc,
329 siop_cmd->dmamap_data->dm_segs[i].ds_len);
330 siop_cmd->siop_tables->data[i].addr =
331 siop_htoc32(sc,
332 siop_cmd->dmamap_data->dm_segs[i].ds_addr);
338 siop_wdtr_neg(struct siop_common_cmd *siop_cmd)
340 struct siop_common_softc *sc = siop_cmd->siop_sc;
341 struct siop_common_target *siop_target = siop_cmd->siop_target;
342 int target = siop_cmd->xs->xs_periph->periph_target;
343 struct siop_common_xfer *tables = siop_cmd->siop_tables;
345 if (siop_target->status == TARST_WIDE_NEG) {
346 /* we initiated wide negotiation */
347 switch (tables->msg_in[3]) {
348 case MSG_EXT_WDTR_BUS_8_BIT:
349 siop_target->flags &= ~TARF_ISWIDE;
350 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
351 break;
352 case MSG_EXT_WDTR_BUS_16_BIT:
353 if (siop_target->flags & TARF_WIDE) {
354 siop_target->flags |= TARF_ISWIDE;
355 sc->targets[target]->id |= (SCNTL3_EWS << 24);
356 break;
358 /* FALLTHROUGH */
359 default:
361 * hum, we got more than what we can handle, shouldn't
362 * happen. Reject, and stay async
364 siop_target->flags &= ~TARF_ISWIDE;
365 siop_target->status = TARST_OK;
366 siop_target->offset = siop_target->period = 0;
367 siop_update_xfer_mode(sc, target);
368 printf("%s: rejecting invalid wide negotiation from "
369 "target %d (%d)\n", device_xname(sc->sc_dev),
370 target,
371 tables->msg_in[3]);
372 tables->t_msgout.count = siop_htoc32(sc, 1);
373 tables->msg_out[0] = MSG_MESSAGE_REJECT;
374 return SIOP_NEG_MSGOUT;
376 tables->id = siop_htoc32(sc, sc->targets[target]->id);
377 bus_space_write_1(sc->sc_rt, sc->sc_rh,
378 SIOP_SCNTL3,
379 (sc->targets[target]->id >> 24) & 0xff);
380 /* we now need to do sync */
381 if (siop_target->flags & TARF_SYNC) {
382 siop_target->status = TARST_SYNC_NEG;
383 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
384 (sc->maxoff > 31) ? 31 : sc->maxoff);
385 return SIOP_NEG_MSGOUT;
386 } else {
387 siop_target->status = TARST_OK;
388 siop_update_xfer_mode(sc, target);
389 return SIOP_NEG_ACK;
391 } else {
392 /* target initiated wide negotiation */
393 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
394 && (siop_target->flags & TARF_WIDE)) {
395 siop_target->flags |= TARF_ISWIDE;
396 sc->targets[target]->id |= SCNTL3_EWS << 24;
397 } else {
398 siop_target->flags &= ~TARF_ISWIDE;
399 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
401 tables->id = siop_htoc32(sc, sc->targets[target]->id);
402 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
403 (sc->targets[target]->id >> 24) & 0xff);
405 * we did reset wide parameters, so fall back to async,
406 * but don't schedule a sync neg, target should initiate it
408 siop_target->status = TARST_OK;
409 siop_target->offset = siop_target->period = 0;
410 siop_update_xfer_mode(sc, target);
411 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
412 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
413 return SIOP_NEG_MSGOUT;
418 siop_ppr_neg(struct siop_common_cmd *siop_cmd)
420 struct siop_common_softc *sc = siop_cmd->siop_sc;
421 struct siop_common_target *siop_target = siop_cmd->siop_target;
422 int target = siop_cmd->xs->xs_periph->periph_target;
423 struct siop_common_xfer *tables = siop_cmd->siop_tables;
424 int sync, offset, options, scf = 0;
425 int i;
427 #ifdef DEBUG_NEG
428 printf("%s: answer on ppr negotiation:", device_xname(sc->sc_dev));
429 for (i = 0; i < 8; i++)
430 printf(" 0x%x", tables->msg_in[i]);
431 printf("\n");
432 #endif
434 if (siop_target->status == TARST_PPR_NEG) {
435 /* we initiated PPR negotiation */
436 sync = tables->msg_in[3];
437 offset = tables->msg_in[5];
438 options = tables->msg_in[7];
439 if (options != MSG_EXT_PPR_DT) {
440 /* should't happen */
441 printf("%s: ppr negotiation for target %d: "
442 "no DT option\n", device_xname(sc->sc_dev), target);
443 siop_target->status = TARST_ASYNC;
444 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
445 siop_target->offset = 0;
446 siop_target->period = 0;
447 goto reject;
450 if (offset > sc->maxoff || sync < sc->dt_minsync ||
451 sync > sc->dt_maxsync) {
452 printf("%s: ppr negotiation for target %d: "
453 "offset (%d) or sync (%d) out of range\n",
454 device_xname(sc->sc_dev), target, offset, sync);
455 /* should not happen */
456 siop_target->offset = 0;
457 siop_target->period = 0;
458 goto reject;
459 } else {
460 for (i = 0; i < __arraycount(dt_scf_period); i++) {
461 if (sc->clock_period != dt_scf_period[i].clock)
462 continue;
463 if (dt_scf_period[i].period == sync) {
464 /* ok, found it. we now are sync. */
465 siop_target->offset = offset;
466 siop_target->period = sync;
467 scf = dt_scf_period[i].scf;
468 siop_target->flags |= TARF_ISDT;
471 if ((siop_target->flags & TARF_ISDT) == 0) {
472 printf("%s: ppr negotiation for target %d: "
473 "sync (%d) incompatible with adapter\n",
474 device_xname(sc->sc_dev), target, sync);
476 * we didn't find it in our table, do async
477 * send reject msg, start SDTR/WDTR neg
479 siop_target->status = TARST_ASYNC;
480 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
481 siop_target->offset = 0;
482 siop_target->period = 0;
483 goto reject;
486 if (tables->msg_in[6] != 1) {
487 printf("%s: ppr negotiation for target %d: "
488 "transfer width (%d) incompatible with dt\n",
489 device_xname(sc->sc_dev),
490 target, tables->msg_in[6]);
491 /* DT mode can only be done with wide transfers */
492 siop_target->status = TARST_ASYNC;
493 goto reject;
495 siop_target->flags |= TARF_ISWIDE;
496 sc->targets[target]->id |= (SCNTL3_EWS << 24);
497 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
498 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
499 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
500 sc->targets[target]->id |=
501 (siop_target->offset & SXFER_MO_MASK) << 8;
502 sc->targets[target]->id &= ~0xff;
503 sc->targets[target]->id |= SCNTL4_U3EN;
504 siop_target->status = TARST_OK;
505 siop_update_xfer_mode(sc, target);
506 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
507 (sc->targets[target]->id >> 24) & 0xff);
508 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
509 (sc->targets[target]->id >> 8) & 0xff);
510 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
511 sc->targets[target]->id & 0xff);
512 return SIOP_NEG_ACK;
513 } else {
514 /* target initiated PPR negotiation, shouldn't happen */
515 printf("%s: rejecting invalid PPR negotiation from "
516 "target %d\n", device_xname(sc->sc_dev), target);
517 reject:
518 tables->t_msgout.count = siop_htoc32(sc, 1);
519 tables->msg_out[0] = MSG_MESSAGE_REJECT;
520 return SIOP_NEG_MSGOUT;
525 siop_sdtr_neg(struct siop_common_cmd *siop_cmd)
527 struct siop_common_softc *sc = siop_cmd->siop_sc;
528 struct siop_common_target *siop_target = siop_cmd->siop_target;
529 int target = siop_cmd->xs->xs_periph->periph_target;
530 int sync, maxoffset, offset, i;
531 int send_msgout = 0;
532 struct siop_common_xfer *tables = siop_cmd->siop_tables;
534 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */
535 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
537 sync = tables->msg_in[3];
538 offset = tables->msg_in[4];
540 if (siop_target->status == TARST_SYNC_NEG) {
541 /* we initiated sync negotiation */
542 siop_target->status = TARST_OK;
543 #ifdef DEBUG
544 printf("sdtr: sync %d offset %d\n", sync, offset);
545 #endif
546 if (offset > maxoffset || sync < sc->st_minsync ||
547 sync > sc->st_maxsync)
548 goto reject;
549 for (i = 0; i < __arraycount(scf_period); i++) {
550 if (sc->clock_period != scf_period[i].clock)
551 continue;
552 if (scf_period[i].period == sync) {
553 /* ok, found it. we now are sync. */
554 siop_target->offset = offset;
555 siop_target->period = sync;
556 sc->targets[target]->id &=
557 ~(SCNTL3_SCF_MASK << 24);
558 sc->targets[target]->id |= scf_period[i].scf
559 << (24 + SCNTL3_SCF_SHIFT);
560 if (sync < 25 && /* Ultra */
561 (sc->features & SF_BUS_ULTRA3) == 0)
562 sc->targets[target]->id |=
563 SCNTL3_ULTRA << 24;
564 else
565 sc->targets[target]->id &=
566 ~(SCNTL3_ULTRA << 24);
567 sc->targets[target]->id &=
568 ~(SXFER_MO_MASK << 8);
569 sc->targets[target]->id |=
570 (offset & SXFER_MO_MASK) << 8;
571 sc->targets[target]->id &= ~0xff; /* scntl4 */
572 goto end;
576 * we didn't find it in our table, do async and send reject
577 * msg
579 reject:
580 send_msgout = 1;
581 tables->t_msgout.count = siop_htoc32(sc, 1);
582 tables->msg_out[0] = MSG_MESSAGE_REJECT;
583 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
584 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
585 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
586 sc->targets[target]->id &= ~0xff; /* scntl4 */
587 siop_target->offset = siop_target->period = 0;
588 } else { /* target initiated sync neg */
589 #ifdef DEBUG
590 printf("sdtr (target): sync %d offset %d\n", sync, offset);
591 #endif
592 if (offset == 0 || sync > sc->st_maxsync) { /* async */
593 goto async;
595 if (offset > maxoffset)
596 offset = maxoffset;
597 if (sync < sc->st_minsync)
598 sync = sc->st_minsync;
599 /* look for sync period */
600 for (i = 0; i < __arraycount(scf_period); i++) {
601 if (sc->clock_period != scf_period[i].clock)
602 continue;
603 if (scf_period[i].period == sync) {
604 /* ok, found it. we now are sync. */
605 siop_target->offset = offset;
606 siop_target->period = sync;
607 sc->targets[target]->id &=
608 ~(SCNTL3_SCF_MASK << 24);
609 sc->targets[target]->id |= scf_period[i].scf
610 << (24 + SCNTL3_SCF_SHIFT);
611 if (sync < 25 && /* Ultra */
612 (sc->features & SF_BUS_ULTRA3) == 0)
613 sc->targets[target]->id |=
614 SCNTL3_ULTRA << 24;
615 else
616 sc->targets[target]->id &=
617 ~(SCNTL3_ULTRA << 24);
618 sc->targets[target]->id &=
619 ~(SXFER_MO_MASK << 8);
620 sc->targets[target]->id |=
621 (offset & SXFER_MO_MASK) << 8;
622 sc->targets[target]->id &= ~0xff; /* scntl4 */
623 siop_sdtr_msg(siop_cmd, 0, sync, offset);
624 send_msgout = 1;
625 goto end;
628 async:
629 siop_target->offset = siop_target->period = 0;
630 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
631 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
632 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
633 sc->targets[target]->id &= ~0xff; /* scntl4 */
634 siop_sdtr_msg(siop_cmd, 0, 0, 0);
635 send_msgout = 1;
637 end:
638 if (siop_target->status == TARST_OK)
639 siop_update_xfer_mode(sc, target);
640 #ifdef DEBUG
641 printf("id now 0x%x\n", sc->targets[target]->id);
642 #endif
643 tables->id = siop_htoc32(sc, sc->targets[target]->id);
644 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
645 (sc->targets[target]->id >> 24) & 0xff);
646 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
647 (sc->targets[target]->id >> 8) & 0xff);
648 if (send_msgout) {
649 return SIOP_NEG_MSGOUT;
650 } else {
651 return SIOP_NEG_ACK;
655 void
656 siop_sdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff)
659 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
660 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
661 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
662 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
663 siop_cmd->siop_tables->msg_out[offset + 4] = soff;
664 siop_cmd->siop_tables->t_msgout.count =
665 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2);
668 void
669 siop_wdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int wide)
672 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
673 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
674 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
675 siop_cmd->siop_tables->msg_out[offset + 3] = wide;
676 siop_cmd->siop_tables->t_msgout.count =
677 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2);
680 void
681 siop_ppr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff)
684 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
685 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
686 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
687 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
688 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
689 siop_cmd->siop_tables->msg_out[offset + 5] = soff;
690 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
691 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
692 siop_cmd->siop_tables->t_msgout.count =
693 siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2);
696 void
697 siop_minphys(struct buf *bp)
700 minphys(bp);
704 siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
705 int flag, struct proc *p)
707 struct siop_common_softc *sc;
709 sc = device_private(chan->chan_adapter->adapt_dev);
711 switch (cmd) {
712 case SCBUSIORESET:
714 * abort the script. This will trigger an interrupt, which will
715 * trigger a bus reset.
716 * We can't safely trigger the reset here as we can't access
717 * the required register while the script is running.
719 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT);
720 return (0);
721 default:
722 return (ENOTTY);
726 void
727 siop_ma(struct siop_common_cmd *siop_cmd)
729 int offset, dbc, sstat;
730 struct siop_common_softc *sc = siop_cmd->siop_sc;
731 scr_table_t *table; /* table with partial xfer */
734 * compute how much of the current table didn't get handled when
735 * a phase mismatch occurs
737 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
738 == 0)
739 return; /* no valid data transfer */
741 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
742 if (offset >= SIOP_NSG) {
743 aprint_error_dev(sc->sc_dev, "bad offset in siop_sdp (%d)\n",
744 offset);
745 return;
747 table = &siop_cmd->siop_tables->data[offset];
748 #ifdef DEBUG_DR
749 printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
750 table->count, table->addr);
751 #endif
752 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
753 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
754 if (sc->features & SF_CHIP_DFBC) {
755 dbc +=
756 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
757 } else {
758 /* need to account stale data in FIFO */
759 int dfifo =
760 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
761 if (sc->features & SF_CHIP_FIFO) {
762 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
763 SIOP_CTEST5) & CTEST5_BOMASK) << 8;
764 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
765 } else {
766 dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
769 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
770 if (sstat & SSTAT0_OLF)
771 dbc++;
772 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
773 dbc++;
774 if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
775 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
776 SIOP_SSTAT2);
777 if (sstat & SSTAT2_OLF1)
778 dbc++;
779 if ((sstat & SSTAT2_ORF1) &&
780 (sc->features & SF_CHIP_DFBC) == 0)
781 dbc++;
783 /* clear the FIFO */
784 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
785 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
786 CTEST3_CLF);
788 siop_cmd->flags |= CMDFL_RESID;
789 siop_cmd->resid = dbc;
792 void
793 siop_sdp(struct siop_common_cmd *siop_cmd, int offset)
795 struct siop_common_softc *sc = siop_cmd->siop_sc;
796 scr_table_t *table;
798 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
799 == 0)
800 return; /* no data pointers to save */
803 * offset == SIOP_NSG may be a valid condition if we get a Save data
804 * pointer when the xfer is done. Just ignore the Save data pointer
805 * in this case
807 if (offset == SIOP_NSG)
808 return;
809 #ifdef DIAGNOSTIC
810 if (offset > SIOP_NSG) {
811 scsipi_printaddr(siop_cmd->xs->xs_periph);
812 printf(": offset %d > %d\n", offset, SIOP_NSG);
813 panic("siop_sdp: offset");
815 #endif
817 * Save data pointer. We do this by adjusting the tables to point
818 * at the begginning of the data not yet transfered.
819 * offset points to the first table with untransfered data.
823 * before doing that we decrease resid from the ammount of data which
824 * has been transfered.
826 siop_update_resid(siop_cmd, offset);
829 * First let see if we have a resid from a phase mismatch. If so,
830 * we have to adjst the table at offset to remove transfered data.
832 if (siop_cmd->flags & CMDFL_RESID) {
833 siop_cmd->flags &= ~CMDFL_RESID;
834 table = &siop_cmd->siop_tables->data[offset];
835 /* "cut" already transfered data from this table */
836 table->addr =
837 siop_htoc32(sc, siop_ctoh32(sc, table->addr) +
838 siop_ctoh32(sc, table->count) - siop_cmd->resid);
839 table->count = siop_htoc32(sc, siop_cmd->resid);
843 * now we can remove entries which have been transfered.
844 * We just move the entries with data left at the beggining of the
845 * tables
847 memmove(&siop_cmd->siop_tables->data[0],
848 &siop_cmd->siop_tables->data[offset],
849 (SIOP_NSG - offset) * sizeof(scr_table_t));
852 void
853 siop_update_resid(struct siop_common_cmd *siop_cmd, int offset)
855 struct siop_common_softc *sc = siop_cmd->siop_sc;
856 scr_table_t *table;
857 int i;
859 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
860 == 0)
861 return; /* no data to transfer */
864 * update resid. First account for the table entries which have
865 * been fully completed.
867 for (i = 0; i < offset; i++)
868 siop_cmd->xs->resid -=
869 siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count);
871 * if CMDFL_RESID is set, the last table (pointed by offset) is a
872 * partial transfers. If not, offset points to the entry folloing
873 * the last full transfer.
875 if (siop_cmd->flags & CMDFL_RESID) {
876 table = &siop_cmd->siop_tables->data[offset];
877 siop_cmd->xs->resid -=
878 siop_ctoh32(sc, table->count) - siop_cmd->resid;
883 siop_iwr(struct siop_common_cmd *siop_cmd)
885 int offset;
886 scr_table_t *table; /* table with IWR */
887 struct siop_common_softc *sc = siop_cmd->siop_sc;
889 /* handle ignore wide residue messages */
891 /* if target isn't wide, reject */
892 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
893 siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1);
894 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
895 return SIOP_NEG_MSGOUT;
897 /* get index of current command in table */
898 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
900 * if the current table did complete, we're now pointing at the
901 * next one. Go back one if we didn't see a phase mismatch.
903 if ((siop_cmd->flags & CMDFL_RESID) == 0)
904 offset--;
905 table = &siop_cmd->siop_tables->data[offset];
907 if ((siop_cmd->flags & CMDFL_RESID) == 0) {
908 if (siop_ctoh32(sc, table->count) & 1) {
909 /* we really got the number of bytes we expected */
910 return SIOP_NEG_ACK;
911 } else {
913 * now we really had a short xfer, by one byte.
914 * handle it just as if we had a phase mistmatch
915 * (there is a resid of one for this table).
916 * Update scratcha1 to reflect the fact that
917 * this xfer isn't complete.
919 siop_cmd->flags |= CMDFL_RESID;
920 siop_cmd->resid = 1;
921 bus_space_write_1(sc->sc_rt, sc->sc_rh,
922 SIOP_SCRATCHA + 1, offset);
923 return SIOP_NEG_ACK;
925 } else {
927 * we already have a short xfer for this table; it's
928 * just one byte less than we though it was
930 siop_cmd->resid--;
931 return SIOP_NEG_ACK;
935 void
936 siop_clearfifo(struct siop_common_softc *sc)
938 int timeout = 0;
939 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
941 #ifdef DEBUG_INTR
942 printf("DMA fifo not empty !\n");
943 #endif
944 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
945 ctest3 | CTEST3_CLF);
946 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
947 CTEST3_CLF) != 0) {
948 delay(1);
949 if (++timeout > 1000) {
950 printf("clear fifo failed\n");
951 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
952 bus_space_read_1(sc->sc_rt, sc->sc_rh,
953 SIOP_CTEST3) & ~CTEST3_CLF);
954 return;
960 siop_modechange(struct siop_common_softc *sc)
962 int retry;
963 int sist0, sist1, stest2;
965 for (retry = 0; retry < 5; retry++) {
967 * datasheet says to wait 100ms and re-read SIST1,
968 * to check that DIFFSENSE is stable.
969 * We may delay() 5 times for 100ms at interrupt time;
970 * hopefully this will not happen often.
972 delay(100000);
973 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
974 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
975 if (sist1 & SIEN1_SBMC)
976 continue; /* we got an irq again */
977 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
978 STEST4_MODE_MASK;
979 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
980 switch(sc->mode) {
981 case STEST4_MODE_DIF:
982 printf("%s: switching to differential mode\n",
983 device_xname(sc->sc_dev));
984 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
985 stest2 | STEST2_DIF);
986 break;
987 case STEST4_MODE_SE:
988 printf("%s: switching to single-ended mode\n",
989 device_xname(sc->sc_dev));
990 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
991 stest2 & ~STEST2_DIF);
992 break;
993 case STEST4_MODE_LVD:
994 printf("%s: switching to LVD mode\n",
995 device_xname(sc->sc_dev));
996 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
997 stest2 & ~STEST2_DIF);
998 break;
999 default:
1000 aprint_error_dev(sc->sc_dev, "invalid SCSI mode 0x%x\n",
1001 sc->mode);
1002 return 0;
1004 return 1;
1006 printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
1007 device_xname(sc->sc_dev));
1008 return 0;
1011 void
1012 siop_resetbus(struct siop_common_softc *sc)
1014 int scntl1;
1016 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
1017 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
1018 scntl1 | SCNTL1_RST);
1019 /* minimum 25 us, more time won't hurt */
1020 delay(100);
1021 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
1024 void
1025 siop_update_xfer_mode(struct siop_common_softc *sc, int target)
1027 struct siop_common_target *siop_target = sc->targets[target];
1028 struct scsipi_xfer_mode xm;
1030 xm.xm_target = target;
1031 xm.xm_mode = 0;
1032 xm.xm_period = 0;
1033 xm.xm_offset = 0;
1035 if (siop_target->flags & TARF_ISWIDE)
1036 xm.xm_mode |= PERIPH_CAP_WIDE16;
1037 if (siop_target->period) {
1038 xm.xm_period = siop_target->period;
1039 xm.xm_offset = siop_target->offset;
1040 xm.xm_mode |= PERIPH_CAP_SYNC;
1042 if (siop_target->flags & TARF_TAG) {
1043 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */
1044 if ((sc->features & SF_CHIP_GEBUG) == 0 ||
1045 (sc->targets[target]->flags & TARF_ISWIDE))
1046 xm.xm_mode |= PERIPH_CAP_TQING;
1049 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);