Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / scsi / aic7xxx / aic7xxx_core.c
blob5f46d064a6b7739c49c08965aec4a3fb63c54665
1 /*
2 * Core routines and tables shareable across OS platforms.
4 * Copyright (c) 1994-2002 Justin T. Gibbs.
5 * Copyright (c) 2000-2002 Adaptec Inc.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * substantially similar to the "NO WARRANTY" disclaimer below
16 * ("Disclaimer") and any redistribution must be conditioned upon
17 * including a substantially similar Disclaimer requirement for further
18 * binary redistribution.
19 * 3. Neither the names of the above-listed copyright holders nor the names
20 * of any contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
23 * Alternatively, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") version 2 as published by the Free
25 * Software Foundation.
27 * NO WARRANTY
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES.
40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
43 #ifdef __linux__
44 #include "aic7xxx_osm.h"
45 #include "aic7xxx_inline.h"
46 #include "aicasm/aicasm_insformat.h"
47 #else
48 #include <dev/aic7xxx/aic7xxx_osm.h>
49 #include <dev/aic7xxx/aic7xxx_inline.h>
50 #include <dev/aic7xxx/aicasm/aicasm_insformat.h>
51 #endif
53 /***************************** Lookup Tables **********************************/
54 char *ahc_chip_names[] =
56 "NONE",
57 "aic7770",
58 "aic7850",
59 "aic7855",
60 "aic7859",
61 "aic7860",
62 "aic7870",
63 "aic7880",
64 "aic7895",
65 "aic7895C",
66 "aic7890/91",
67 "aic7896/97",
68 "aic7892",
69 "aic7899"
71 static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
74 * Hardware error codes.
76 struct ahc_hard_error_entry {
77 uint8_t errno;
78 char *errmesg;
81 static struct ahc_hard_error_entry ahc_hard_errors[] = {
82 { ILLHADDR, "Illegal Host Access" },
83 { ILLSADDR, "Illegal Sequencer Address referrenced" },
84 { ILLOPCODE, "Illegal Opcode in sequencer program" },
85 { SQPARERR, "Sequencer Parity Error" },
86 { DPARERR, "Data-path Parity Error" },
87 { MPARERR, "Scratch or SCB Memory Parity Error" },
88 { PCIERRSTAT, "PCI Error detected" },
89 { CIOPARERR, "CIOBUS Parity Error" },
91 static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
93 static struct ahc_phase_table_entry ahc_phase_table[] =
95 { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
96 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
97 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
98 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
99 { P_COMMAND, MSG_NOOP, "in Command phase" },
100 { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
101 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
102 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
103 { P_BUSFREE, MSG_NOOP, "while idle" },
104 { 0, MSG_NOOP, "in unknown phase" }
108 * In most cases we only wish to itterate over real phases, so
109 * exclude the last element from the count.
111 static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1;
114 * Valid SCSIRATE values. (p. 3-17)
115 * Provides a mapping of tranfer periods in ns to the proper value to
116 * stick in the scsixfer reg.
118 static struct ahc_syncrate ahc_syncrates[] =
120 /* ultra2 fast/ultra period rate */
121 { 0x42, 0x000, 9, "80.0" },
122 { 0x03, 0x000, 10, "40.0" },
123 { 0x04, 0x000, 11, "33.0" },
124 { 0x05, 0x100, 12, "20.0" },
125 { 0x06, 0x110, 15, "16.0" },
126 { 0x07, 0x120, 18, "13.4" },
127 { 0x08, 0x000, 25, "10.0" },
128 { 0x19, 0x010, 31, "8.0" },
129 { 0x1a, 0x020, 37, "6.67" },
130 { 0x1b, 0x030, 43, "5.7" },
131 { 0x1c, 0x040, 50, "5.0" },
132 { 0x00, 0x050, 56, "4.4" },
133 { 0x00, 0x060, 62, "4.0" },
134 { 0x00, 0x070, 68, "3.6" },
135 { 0x00, 0x000, 0, NULL }
138 /* Our Sequencer Program */
139 #include "aic7xxx_seq.h"
141 /**************************** Function Declarations ***************************/
142 static void ahc_force_renegotiation(struct ahc_softc *ahc,
143 struct ahc_devinfo *devinfo);
144 static struct ahc_tmode_tstate*
145 ahc_alloc_tstate(struct ahc_softc *ahc,
146 u_int scsi_id, char channel);
147 #ifdef AHC_TARGET_MODE
148 static void ahc_free_tstate(struct ahc_softc *ahc,
149 u_int scsi_id, char channel, int force);
150 #endif
151 static struct ahc_syncrate*
152 ahc_devlimited_syncrate(struct ahc_softc *ahc,
153 struct ahc_initiator_tinfo *,
154 u_int *period,
155 u_int *ppr_options,
156 role_t role);
157 static void ahc_update_pending_scbs(struct ahc_softc *ahc);
158 static void ahc_fetch_devinfo(struct ahc_softc *ahc,
159 struct ahc_devinfo *devinfo);
160 static void ahc_scb_devinfo(struct ahc_softc *ahc,
161 struct ahc_devinfo *devinfo,
162 struct scb *scb);
163 static void ahc_assert_atn(struct ahc_softc *ahc);
164 static void ahc_setup_initiator_msgout(struct ahc_softc *ahc,
165 struct ahc_devinfo *devinfo,
166 struct scb *scb);
167 static void ahc_build_transfer_msg(struct ahc_softc *ahc,
168 struct ahc_devinfo *devinfo);
169 static void ahc_construct_sdtr(struct ahc_softc *ahc,
170 struct ahc_devinfo *devinfo,
171 u_int period, u_int offset);
172 static void ahc_construct_wdtr(struct ahc_softc *ahc,
173 struct ahc_devinfo *devinfo,
174 u_int bus_width);
175 static void ahc_construct_ppr(struct ahc_softc *ahc,
176 struct ahc_devinfo *devinfo,
177 u_int period, u_int offset,
178 u_int bus_width, u_int ppr_options);
179 static void ahc_clear_msg_state(struct ahc_softc *ahc);
180 static void ahc_handle_proto_violation(struct ahc_softc *ahc);
181 static void ahc_handle_message_phase(struct ahc_softc *ahc);
182 typedef enum {
183 AHCMSG_1B,
184 AHCMSG_2B,
185 AHCMSG_EXT
186 } ahc_msgtype;
187 static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
188 u_int msgval, int full);
189 static int ahc_parse_msg(struct ahc_softc *ahc,
190 struct ahc_devinfo *devinfo);
191 static int ahc_handle_msg_reject(struct ahc_softc *ahc,
192 struct ahc_devinfo *devinfo);
193 static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
194 struct ahc_devinfo *devinfo);
195 static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
196 static void ahc_handle_devreset(struct ahc_softc *ahc,
197 struct ahc_devinfo *devinfo,
198 cam_status status, char *message,
199 int verbose_level);
200 #ifdef AHC_TARGET_MODE
201 static void ahc_setup_target_msgin(struct ahc_softc *ahc,
202 struct ahc_devinfo *devinfo,
203 struct scb *scb);
204 #endif
206 static bus_dmamap_callback_t ahc_dmamap_cb;
207 static void ahc_build_free_scb_list(struct ahc_softc *ahc);
208 static int ahc_init_scbdata(struct ahc_softc *ahc);
209 static void ahc_fini_scbdata(struct ahc_softc *ahc);
210 static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
211 struct scb *prev_scb,
212 struct scb *scb);
213 static int ahc_qinfifo_count(struct ahc_softc *ahc);
214 static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
215 u_int prev, u_int scbptr);
216 static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
217 static u_int ahc_rem_wscb(struct ahc_softc *ahc,
218 u_int scbpos, u_int prev);
219 static void ahc_reset_current_bus(struct ahc_softc *ahc);
220 #ifdef AHC_DUMP_SEQ
221 static void ahc_dumpseq(struct ahc_softc *ahc);
222 #endif
223 static int ahc_loadseq(struct ahc_softc *ahc);
224 static int ahc_check_patch(struct ahc_softc *ahc,
225 struct patch **start_patch,
226 u_int start_instr, u_int *skip_addr);
227 static void ahc_download_instr(struct ahc_softc *ahc,
228 u_int instrptr, uint8_t *dconsts);
229 #ifdef AHC_TARGET_MODE
230 static void ahc_queue_lstate_event(struct ahc_softc *ahc,
231 struct ahc_tmode_lstate *lstate,
232 u_int initiator_id,
233 u_int event_type,
234 u_int event_arg);
235 static void ahc_update_scsiid(struct ahc_softc *ahc,
236 u_int targid_mask);
237 static int ahc_handle_target_cmd(struct ahc_softc *ahc,
238 struct target_cmd *cmd);
239 #endif
240 /************************* Sequencer Execution Control ************************/
242 * Restart the sequencer program from address zero
244 void
245 ahc_restart(struct ahc_softc *ahc)
248 ahc_pause(ahc);
250 /* No more pending messages. */
251 ahc_clear_msg_state(ahc);
253 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */
254 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */
255 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
256 ahc_outb(ahc, LASTPHASE, P_BUSFREE);
257 ahc_outb(ahc, SAVED_SCSIID, 0xFF);
258 ahc_outb(ahc, SAVED_LUN, 0xFF);
261 * Ensure that the sequencer's idea of TQINPOS
262 * matches our own. The sequencer increments TQINPOS
263 * only after it sees a DMA complete and a reset could
264 * occur before the increment leaving the kernel to believe
265 * the command arrived but the sequencer to not.
267 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
269 /* Always allow reselection */
270 ahc_outb(ahc, SCSISEQ,
271 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
272 if ((ahc->features & AHC_CMD_CHAN) != 0) {
273 /* Ensure that no DMA operations are in progress */
274 ahc_outb(ahc, CCSCBCNT, 0);
275 ahc_outb(ahc, CCSGCTL, 0);
276 ahc_outb(ahc, CCSCBCTL, 0);
279 * If we were in the process of DMA'ing SCB data into
280 * an SCB, replace that SCB on the free list. This prevents
281 * an SCB leak.
283 if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
284 ahc_add_curscb_to_free_list(ahc);
285 ahc_outb(ahc, SEQ_FLAGS2,
286 ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
290 * Clear any pending sequencer interrupt. It is no
291 * longer relevant since we're resetting the Program
292 * Counter.
294 ahc_outb(ahc, CLRINT, CLRSEQINT);
296 ahc_outb(ahc, MWI_RESIDUAL, 0);
297 ahc_outb(ahc, SEQCTL, ahc->seqctl);
298 ahc_outb(ahc, SEQADDR0, 0);
299 ahc_outb(ahc, SEQADDR1, 0);
301 ahc_unpause(ahc);
304 /************************* Input/Output Queues ********************************/
305 void
306 ahc_run_qoutfifo(struct ahc_softc *ahc)
308 struct scb *scb;
309 u_int scb_index;
311 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
312 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
314 scb_index = ahc->qoutfifo[ahc->qoutfifonext];
315 if ((ahc->qoutfifonext & 0x03) == 0x03) {
316 u_int modnext;
319 * Clear 32bits of QOUTFIFO at a time
320 * so that we don't clobber an incoming
321 * byte DMA to the array on architectures
322 * that only support 32bit load and store
323 * operations.
325 modnext = ahc->qoutfifonext & ~0x3;
326 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
327 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
328 ahc->shared_data_dmamap,
329 /*offset*/modnext, /*len*/4,
330 BUS_DMASYNC_PREREAD);
332 ahc->qoutfifonext++;
334 scb = ahc_lookup_scb(ahc, scb_index);
335 if (scb == NULL) {
336 printf("%s: WARNING no command for scb %d "
337 "(cmdcmplt)\nQOUTPOS = %d\n",
338 ahc_name(ahc), scb_index,
339 (ahc->qoutfifonext - 1) & 0xFF);
340 continue;
344 * Save off the residual
345 * if there is one.
347 ahc_update_residual(ahc, scb);
348 ahc_done(ahc, scb);
352 void
353 ahc_run_untagged_queues(struct ahc_softc *ahc)
355 int i;
357 for (i = 0; i < 16; i++)
358 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
361 void
362 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
364 struct scb *scb;
366 if (ahc->untagged_queue_lock != 0)
367 return;
369 if ((scb = TAILQ_FIRST(queue)) != NULL
370 && (scb->flags & SCB_ACTIVE) == 0) {
371 scb->flags |= SCB_ACTIVE;
372 ahc_queue_scb(ahc, scb);
376 /************************* Interrupt Handling *********************************/
377 void
378 ahc_handle_brkadrint(struct ahc_softc *ahc)
381 * We upset the sequencer :-(
382 * Lookup the error message
384 int i;
385 int error;
387 error = ahc_inb(ahc, ERROR);
388 for (i = 0; error != 1 && i < num_errors; i++)
389 error >>= 1;
390 printf("%s: brkadrint, %s at seqaddr = 0x%x\n",
391 ahc_name(ahc), ahc_hard_errors[i].errmesg,
392 ahc_inb(ahc, SEQADDR0) |
393 (ahc_inb(ahc, SEQADDR1) << 8));
395 ahc_dump_card_state(ahc);
397 /* Tell everyone that this HBA is no longer available */
398 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
399 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
400 CAM_NO_HBA);
402 /* Disable all interrupt sources by resetting the controller */
403 ahc_shutdown(ahc);
406 void
407 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
409 struct scb *scb;
410 struct ahc_devinfo devinfo;
412 ahc_fetch_devinfo(ahc, &devinfo);
415 * Clear the upper byte that holds SEQINT status
416 * codes and clear the SEQINT bit. We will unpause
417 * the sequencer, if appropriate, after servicing
418 * the request.
420 ahc_outb(ahc, CLRINT, CLRSEQINT);
421 switch (intstat & SEQINT_MASK) {
422 case BAD_STATUS:
424 u_int scb_index;
425 struct hardware_scb *hscb;
428 * Set the default return value to 0 (don't
429 * send sense). The sense code will change
430 * this if needed.
432 ahc_outb(ahc, RETURN_1, 0);
435 * The sequencer will notify us when a command
436 * has an error that would be of interest to
437 * the kernel. This allows us to leave the sequencer
438 * running in the common case of command completes
439 * without error. The sequencer will already have
440 * dma'd the SCB back up to us, so we can reference
441 * the in kernel copy directly.
443 scb_index = ahc_inb(ahc, SCB_TAG);
444 scb = ahc_lookup_scb(ahc, scb_index);
445 if (scb == NULL) {
446 ahc_print_devinfo(ahc, &devinfo);
447 printf("ahc_intr - referenced scb "
448 "not valid during seqint 0x%x scb(%d)\n",
449 intstat, scb_index);
450 ahc_dump_card_state(ahc);
451 panic("for safety");
452 goto unpause;
455 hscb = scb->hscb;
457 /* Don't want to clobber the original sense code */
458 if ((scb->flags & SCB_SENSE) != 0) {
460 * Clear the SCB_SENSE Flag and have
461 * the sequencer do a normal command
462 * complete.
464 scb->flags &= ~SCB_SENSE;
465 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
466 break;
468 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
469 /* Freeze the queue until the client sees the error. */
470 ahc_freeze_devq(ahc, scb);
471 ahc_freeze_scb(scb);
472 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
473 switch (hscb->shared_data.status.scsi_status) {
474 case SCSI_STATUS_OK:
475 printf("%s: Interrupted for staus of 0???\n",
476 ahc_name(ahc));
477 break;
478 case SCSI_STATUS_CMD_TERMINATED:
479 case SCSI_STATUS_CHECK_COND:
481 struct ahc_dma_seg *sg;
482 struct scsi_sense *sc;
483 struct ahc_initiator_tinfo *targ_info;
484 struct ahc_tmode_tstate *tstate;
485 struct ahc_transinfo *tinfo;
486 #ifdef AHC_DEBUG
487 if (ahc_debug & AHC_SHOW_SENSE) {
488 ahc_print_path(ahc, scb);
489 printf("SCB %d: requests Check Status\n",
490 scb->hscb->tag);
492 #endif
494 if (ahc_perform_autosense(scb) == 0)
495 break;
497 targ_info = ahc_fetch_transinfo(ahc,
498 devinfo.channel,
499 devinfo.our_scsiid,
500 devinfo.target,
501 &tstate);
502 tinfo = &targ_info->curr;
503 sg = scb->sg_list;
504 sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
506 * Save off the residual if there is one.
508 ahc_update_residual(ahc, scb);
509 #ifdef AHC_DEBUG
510 if (ahc_debug & AHC_SHOW_SENSE) {
511 ahc_print_path(ahc, scb);
512 printf("Sending Sense\n");
514 #endif
515 sg->addr = ahc_get_sense_bufaddr(ahc, scb);
516 sg->len = ahc_get_sense_bufsize(ahc, scb);
517 sg->len |= AHC_DMA_LAST_SEG;
519 /* Fixup byte order */
520 sg->addr = ahc_htole32(sg->addr);
521 sg->len = ahc_htole32(sg->len);
523 sc->opcode = REQUEST_SENSE;
524 sc->byte2 = 0;
525 if (tinfo->protocol_version <= SCSI_REV_2
526 && SCB_GET_LUN(scb) < 8)
527 sc->byte2 = SCB_GET_LUN(scb) << 5;
528 sc->unused[0] = 0;
529 sc->unused[1] = 0;
530 sc->length = sg->len;
531 sc->control = 0;
534 * We can't allow the target to disconnect.
535 * This will be an untagged transaction and
536 * having the target disconnect will make this
537 * transaction indestinguishable from outstanding
538 * tagged transactions.
540 hscb->control = 0;
543 * This request sense could be because the
544 * the device lost power or in some other
545 * way has lost our transfer negotiations.
546 * Renegotiate if appropriate. Unit attention
547 * errors will be reported before any data
548 * phases occur.
550 if (ahc_get_residual(scb)
551 == ahc_get_transfer_length(scb)) {
552 ahc_update_neg_request(ahc, &devinfo,
553 tstate, targ_info,
554 AHC_NEG_IF_NON_ASYNC);
556 if (tstate->auto_negotiate & devinfo.target_mask) {
557 hscb->control |= MK_MESSAGE;
558 scb->flags &= ~SCB_NEGOTIATE;
559 scb->flags |= SCB_AUTO_NEGOTIATE;
561 hscb->cdb_len = sizeof(*sc);
562 hscb->dataptr = sg->addr;
563 hscb->datacnt = sg->len;
564 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
565 hscb->sgptr = ahc_htole32(hscb->sgptr);
566 scb->sg_count = 1;
567 scb->flags |= SCB_SENSE;
568 ahc_qinfifo_requeue_tail(ahc, scb);
569 ahc_outb(ahc, RETURN_1, SEND_SENSE);
571 * Ensure we have enough time to actually
572 * retrieve the sense.
574 ahc_scb_timer_reset(scb, 5 * 1000000);
575 break;
577 default:
578 break;
580 break;
582 case NO_MATCH:
584 /* Ensure we don't leave the selection hardware on */
585 ahc_outb(ahc, SCSISEQ,
586 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
588 printf("%s:%c:%d: no active SCB for reconnecting "
589 "target - issuing BUS DEVICE RESET\n",
590 ahc_name(ahc), devinfo.channel, devinfo.target);
591 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
592 "ARG_1 == 0x%x ACCUM = 0x%x\n",
593 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
594 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
595 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
596 "SINDEX == 0x%x\n",
597 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
598 ahc_index_busy_tcl(ahc,
599 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
600 ahc_inb(ahc, SAVED_LUN))),
601 ahc_inb(ahc, SINDEX));
602 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
603 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
604 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
605 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
606 ahc_inb(ahc, SCB_CONTROL));
607 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
608 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
609 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
610 printf("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
611 ahc_dump_card_state(ahc);
612 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
613 ahc->msgout_len = 1;
614 ahc->msgout_index = 0;
615 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
616 ahc_outb(ahc, MSG_OUT, HOST_MSG);
617 ahc_assert_atn(ahc);
618 break;
620 case SEND_REJECT:
622 u_int rejbyte = ahc_inb(ahc, ACCUM);
623 printf("%s:%c:%d: Warning - unknown message received from "
624 "target (0x%x). Rejecting\n",
625 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
626 break;
628 case PROTO_VIOLATION:
630 ahc_handle_proto_violation(ahc);
631 break;
633 case IGN_WIDE_RES:
634 ahc_handle_ign_wide_residue(ahc, &devinfo);
635 break;
636 case PDATA_REINIT:
637 ahc_reinitialize_dataptrs(ahc);
638 break;
639 case BAD_PHASE:
641 u_int lastphase;
643 lastphase = ahc_inb(ahc, LASTPHASE);
644 printf("%s:%c:%d: unknown scsi bus phase %x, "
645 "lastphase = 0x%x. Attempting to continue\n",
646 ahc_name(ahc), devinfo.channel, devinfo.target,
647 lastphase, ahc_inb(ahc, SCSISIGI));
648 break;
650 case MISSED_BUSFREE:
652 u_int lastphase;
654 lastphase = ahc_inb(ahc, LASTPHASE);
655 printf("%s:%c:%d: Missed busfree. "
656 "Lastphase = 0x%x, Curphase = 0x%x\n",
657 ahc_name(ahc), devinfo.channel, devinfo.target,
658 lastphase, ahc_inb(ahc, SCSISIGI));
659 ahc_restart(ahc);
660 return;
662 case HOST_MSG_LOOP:
665 * The sequencer has encountered a message phase
666 * that requires host assistance for completion.
667 * While handling the message phase(s), we will be
668 * notified by the sequencer after each byte is
669 * transfered so we can track bus phase changes.
671 * If this is the first time we've seen a HOST_MSG_LOOP
672 * interrupt, initialize the state of the host message
673 * loop.
675 if (ahc->msg_type == MSG_TYPE_NONE) {
676 struct scb *scb;
677 u_int scb_index;
678 u_int bus_phase;
680 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
681 if (bus_phase != P_MESGIN
682 && bus_phase != P_MESGOUT) {
683 printf("ahc_intr: HOST_MSG_LOOP bad "
684 "phase 0x%x\n",
685 bus_phase);
687 * Probably transitioned to bus free before
688 * we got here. Just punt the message.
690 ahc_clear_intstat(ahc);
691 ahc_restart(ahc);
692 return;
695 scb_index = ahc_inb(ahc, SCB_TAG);
696 scb = ahc_lookup_scb(ahc, scb_index);
697 if (devinfo.role == ROLE_INITIATOR) {
698 <<<<<<< HEAD:drivers/scsi/aic7xxx/aic7xxx_core.c
699 if (scb == NULL)
700 panic("HOST_MSG_LOOP with "
701 "invalid SCB %x\n", scb_index);
702 =======
703 if (bus_phase == P_MESGOUT) {
704 if (scb == NULL)
705 panic("HOST_MSG_LOOP with "
706 "invalid SCB %x\n",
707 scb_index);
708 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/scsi/aic7xxx/aic7xxx_core.c
710 <<<<<<< HEAD:drivers/scsi/aic7xxx/aic7xxx_core.c
711 if (bus_phase == P_MESGOUT)
712 =======
713 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/scsi/aic7xxx/aic7xxx_core.c
714 ahc_setup_initiator_msgout(ahc,
715 &devinfo,
716 scb);
717 <<<<<<< HEAD:drivers/scsi/aic7xxx/aic7xxx_core.c
718 else {
719 =======
720 } else {
721 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/scsi/aic7xxx/aic7xxx_core.c
722 ahc->msg_type =
723 MSG_TYPE_INITIATOR_MSGIN;
724 ahc->msgin_index = 0;
727 #ifdef AHC_TARGET_MODE
728 else {
729 if (bus_phase == P_MESGOUT) {
730 ahc->msg_type =
731 MSG_TYPE_TARGET_MSGOUT;
732 ahc->msgin_index = 0;
734 else
735 ahc_setup_target_msgin(ahc,
736 &devinfo,
737 scb);
739 #endif
742 ahc_handle_message_phase(ahc);
743 break;
745 case PERR_DETECTED:
748 * If we've cleared the parity error interrupt
749 * but the sequencer still believes that SCSIPERR
750 * is true, it must be that the parity error is
751 * for the currently presented byte on the bus,
752 * and we are not in a phase (data-in) where we will
753 * eventually ack this byte. Ack the byte and
754 * throw it away in the hope that the target will
755 * take us to message out to deliver the appropriate
756 * error message.
758 if ((intstat & SCSIINT) == 0
759 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
761 if ((ahc->features & AHC_DT) == 0) {
762 u_int curphase;
765 * The hardware will only let you ack bytes
766 * if the expected phase in SCSISIGO matches
767 * the current phase. Make sure this is
768 * currently the case.
770 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
771 ahc_outb(ahc, LASTPHASE, curphase);
772 ahc_outb(ahc, SCSISIGO, curphase);
774 if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) {
775 int wait;
778 * In a data phase. Faster to bitbucket
779 * the data than to individually ack each
780 * byte. This is also the only strategy
781 * that will work with AUTOACK enabled.
783 ahc_outb(ahc, SXFRCTL1,
784 ahc_inb(ahc, SXFRCTL1) | BITBUCKET);
785 wait = 5000;
786 while (--wait != 0) {
787 if ((ahc_inb(ahc, SCSISIGI)
788 & (CDI|MSGI)) != 0)
789 break;
790 ahc_delay(100);
792 ahc_outb(ahc, SXFRCTL1,
793 ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
794 if (wait == 0) {
795 struct scb *scb;
796 u_int scb_index;
798 ahc_print_devinfo(ahc, &devinfo);
799 printf("Unable to clear parity error. "
800 "Resetting bus.\n");
801 scb_index = ahc_inb(ahc, SCB_TAG);
802 scb = ahc_lookup_scb(ahc, scb_index);
803 if (scb != NULL)
804 ahc_set_transaction_status(scb,
805 CAM_UNCOR_PARITY);
806 ahc_reset_channel(ahc, devinfo.channel,
807 /*init reset*/TRUE);
809 } else {
810 ahc_inb(ahc, SCSIDATL);
813 break;
815 case DATA_OVERRUN:
818 * When the sequencer detects an overrun, it
819 * places the controller in "BITBUCKET" mode
820 * and allows the target to complete its transfer.
821 * Unfortunately, none of the counters get updated
822 * when the controller is in this mode, so we have
823 * no way of knowing how large the overrun was.
825 u_int scbindex = ahc_inb(ahc, SCB_TAG);
826 u_int lastphase = ahc_inb(ahc, LASTPHASE);
827 u_int i;
829 scb = ahc_lookup_scb(ahc, scbindex);
830 for (i = 0; i < num_phases; i++) {
831 if (lastphase == ahc_phase_table[i].phase)
832 break;
834 ahc_print_path(ahc, scb);
835 printf("data overrun detected %s."
836 " Tag == 0x%x.\n",
837 ahc_phase_table[i].phasemsg,
838 scb->hscb->tag);
839 ahc_print_path(ahc, scb);
840 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
841 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
842 ahc_get_transfer_length(scb), scb->sg_count);
843 if (scb->sg_count > 0) {
844 for (i = 0; i < scb->sg_count; i++) {
846 printf("sg[%d] - Addr 0x%x%x : Length %d\n",
848 (ahc_le32toh(scb->sg_list[i].len) >> 24
849 & SG_HIGH_ADDR_BITS),
850 ahc_le32toh(scb->sg_list[i].addr),
851 ahc_le32toh(scb->sg_list[i].len)
852 & AHC_SG_LEN_MASK);
856 * Set this and it will take effect when the
857 * target does a command complete.
859 ahc_freeze_devq(ahc, scb);
860 if ((scb->flags & SCB_SENSE) == 0) {
861 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
862 } else {
863 scb->flags &= ~SCB_SENSE;
864 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
866 ahc_freeze_scb(scb);
868 if ((ahc->features & AHC_ULTRA2) != 0) {
870 * Clear the channel in case we return
871 * to data phase later.
873 ahc_outb(ahc, SXFRCTL0,
874 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
875 ahc_outb(ahc, SXFRCTL0,
876 ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
878 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
879 u_int dscommand1;
881 /* Ensure HHADDR is 0 for future DMA operations. */
882 dscommand1 = ahc_inb(ahc, DSCOMMAND1);
883 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
884 ahc_outb(ahc, HADDR, 0);
885 ahc_outb(ahc, DSCOMMAND1, dscommand1);
887 break;
889 case MKMSG_FAILED:
891 u_int scbindex;
893 printf("%s:%c:%d:%d: Attempt to issue message failed\n",
894 ahc_name(ahc), devinfo.channel, devinfo.target,
895 devinfo.lun);
896 scbindex = ahc_inb(ahc, SCB_TAG);
897 scb = ahc_lookup_scb(ahc, scbindex);
898 if (scb != NULL
899 && (scb->flags & SCB_RECOVERY_SCB) != 0)
901 * Ensure that we didn't put a second instance of this
902 * SCB into the QINFIFO.
904 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
905 SCB_GET_CHANNEL(ahc, scb),
906 SCB_GET_LUN(scb), scb->hscb->tag,
907 ROLE_INITIATOR, /*status*/0,
908 SEARCH_REMOVE);
909 break;
911 case NO_FREE_SCB:
913 printf("%s: No free or disconnected SCBs\n", ahc_name(ahc));
914 ahc_dump_card_state(ahc);
915 panic("for safety");
916 break;
918 case SCB_MISMATCH:
920 u_int scbptr;
922 scbptr = ahc_inb(ahc, SCBPTR);
923 printf("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n",
924 scbptr, ahc_inb(ahc, ARG_1),
925 ahc->scb_data->hscbs[scbptr].tag);
926 ahc_dump_card_state(ahc);
927 panic("for saftey");
928 break;
930 case OUT_OF_RANGE:
932 printf("%s: BTT calculation out of range\n", ahc_name(ahc));
933 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
934 "ARG_1 == 0x%x ACCUM = 0x%x\n",
935 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
936 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
937 printf("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
938 "SINDEX == 0x%x\n, A == 0x%x\n",
939 ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
940 ahc_index_busy_tcl(ahc,
941 BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
942 ahc_inb(ahc, SAVED_LUN))),
943 ahc_inb(ahc, SINDEX),
944 ahc_inb(ahc, ACCUM));
945 printf("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
946 "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
947 ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
948 ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
949 ahc_inb(ahc, SCB_CONTROL));
950 printf("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
951 ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
952 ahc_dump_card_state(ahc);
953 panic("for safety");
954 break;
956 default:
957 printf("ahc_intr: seqint, "
958 "intstat == 0x%x, scsisigi = 0x%x\n",
959 intstat, ahc_inb(ahc, SCSISIGI));
960 break;
962 unpause:
964 * The sequencer is paused immediately on
965 * a SEQINT, so we should restart it when
966 * we're done.
968 ahc_unpause(ahc);
971 void
972 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
974 u_int scb_index;
975 u_int status0;
976 u_int status;
977 struct scb *scb;
978 char cur_channel;
979 char intr_channel;
981 if ((ahc->features & AHC_TWIN) != 0
982 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
983 cur_channel = 'B';
984 else
985 cur_channel = 'A';
986 intr_channel = cur_channel;
988 if ((ahc->features & AHC_ULTRA2) != 0)
989 status0 = ahc_inb(ahc, SSTAT0) & IOERR;
990 else
991 status0 = 0;
992 status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
993 if (status == 0 && status0 == 0) {
994 if ((ahc->features & AHC_TWIN) != 0) {
995 /* Try the other channel */
996 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
997 status = ahc_inb(ahc, SSTAT1)
998 & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
999 intr_channel = (cur_channel == 'A') ? 'B' : 'A';
1001 if (status == 0) {
1002 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
1003 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1004 ahc_unpause(ahc);
1005 return;
1009 /* Make sure the sequencer is in a safe location. */
1010 ahc_clear_critical_section(ahc);
1012 scb_index = ahc_inb(ahc, SCB_TAG);
1013 scb = ahc_lookup_scb(ahc, scb_index);
1014 if (scb != NULL
1015 && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
1016 scb = NULL;
1018 if ((ahc->features & AHC_ULTRA2) != 0
1019 && (status0 & IOERR) != 0) {
1020 int now_lvd;
1022 now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
1023 printf("%s: Transceiver State Has Changed to %s mode\n",
1024 ahc_name(ahc), now_lvd ? "LVD" : "SE");
1025 ahc_outb(ahc, CLRSINT0, CLRIOERR);
1027 * When transitioning to SE mode, the reset line
1028 * glitches, triggering an arbitration bug in some
1029 * Ultra2 controllers. This bug is cleared when we
1030 * assert the reset line. Since a reset glitch has
1031 * already occurred with this transition and a
1032 * transceiver state change is handled just like
1033 * a bus reset anyway, asserting the reset line
1034 * ourselves is safe.
1036 ahc_reset_channel(ahc, intr_channel,
1037 /*Initiate Reset*/now_lvd == 0);
1038 } else if ((status & SCSIRSTI) != 0) {
1039 printf("%s: Someone reset channel %c\n",
1040 ahc_name(ahc), intr_channel);
1041 if (intr_channel != cur_channel)
1042 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
1043 ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
1044 } else if ((status & SCSIPERR) != 0) {
1046 * Determine the bus phase and queue an appropriate message.
1047 * SCSIPERR is latched true as soon as a parity error
1048 * occurs. If the sequencer acked the transfer that
1049 * caused the parity error and the currently presented
1050 * transfer on the bus has correct parity, SCSIPERR will
1051 * be cleared by CLRSCSIPERR. Use this to determine if
1052 * we should look at the last phase the sequencer recorded,
1053 * or the current phase presented on the bus.
1055 struct ahc_devinfo devinfo;
1056 u_int mesg_out;
1057 u_int curphase;
1058 u_int errorphase;
1059 u_int lastphase;
1060 u_int scsirate;
1061 u_int i;
1062 u_int sstat2;
1063 int silent;
1065 lastphase = ahc_inb(ahc, LASTPHASE);
1066 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
1067 sstat2 = ahc_inb(ahc, SSTAT2);
1068 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
1070 * For all phases save DATA, the sequencer won't
1071 * automatically ack a byte that has a parity error
1072 * in it. So the only way that the current phase
1073 * could be 'data-in' is if the parity error is for
1074 * an already acked byte in the data phase. During
1075 * synchronous data-in transfers, we may actually
1076 * ack bytes before latching the current phase in
1077 * LASTPHASE, leading to the discrepancy between
1078 * curphase and lastphase.
1080 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
1081 || curphase == P_DATAIN || curphase == P_DATAIN_DT)
1082 errorphase = curphase;
1083 else
1084 errorphase = lastphase;
1086 for (i = 0; i < num_phases; i++) {
1087 if (errorphase == ahc_phase_table[i].phase)
1088 break;
1090 mesg_out = ahc_phase_table[i].mesg_out;
1091 silent = FALSE;
1092 if (scb != NULL) {
1093 if (SCB_IS_SILENT(scb))
1094 silent = TRUE;
1095 else
1096 ahc_print_path(ahc, scb);
1097 scb->flags |= SCB_TRANSMISSION_ERROR;
1098 } else
1099 printf("%s:%c:%d: ", ahc_name(ahc), intr_channel,
1100 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
1101 scsirate = ahc_inb(ahc, SCSIRATE);
1102 if (silent == FALSE) {
1103 printf("parity error detected %s. "
1104 "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
1105 ahc_phase_table[i].phasemsg,
1106 ahc_inw(ahc, SEQADDR0),
1107 scsirate);
1108 if ((ahc->features & AHC_DT) != 0) {
1109 if ((sstat2 & CRCVALERR) != 0)
1110 printf("\tCRC Value Mismatch\n");
1111 if ((sstat2 & CRCENDERR) != 0)
1112 printf("\tNo terminal CRC packet "
1113 "recevied\n");
1114 if ((sstat2 & CRCREQERR) != 0)
1115 printf("\tIllegal CRC packet "
1116 "request\n");
1117 if ((sstat2 & DUAL_EDGE_ERR) != 0)
1118 printf("\tUnexpected %sDT Data Phase\n",
1119 (scsirate & SINGLE_EDGE)
1120 ? "" : "non-");
1124 if ((ahc->features & AHC_DT) != 0
1125 && (sstat2 & DUAL_EDGE_ERR) != 0) {
1127 * This error applies regardless of
1128 * data direction, so ignore the value
1129 * in the phase table.
1131 mesg_out = MSG_INITIATOR_DET_ERR;
1135 * We've set the hardware to assert ATN if we
1136 * get a parity error on "in" phases, so all we
1137 * need to do is stuff the message buffer with
1138 * the appropriate message. "In" phases have set
1139 * mesg_out to something other than MSG_NOP.
1141 if (mesg_out != MSG_NOOP) {
1142 if (ahc->msg_type != MSG_TYPE_NONE)
1143 ahc->send_msg_perror = TRUE;
1144 else
1145 ahc_outb(ahc, MSG_OUT, mesg_out);
1148 * Force a renegotiation with this target just in
1149 * case we are out of sync for some external reason
1150 * unknown (or unreported) by the target.
1152 ahc_fetch_devinfo(ahc, &devinfo);
1153 ahc_force_renegotiation(ahc, &devinfo);
1155 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1156 ahc_unpause(ahc);
1157 } else if ((status & SELTO) != 0) {
1158 u_int scbptr;
1160 /* Stop the selection */
1161 ahc_outb(ahc, SCSISEQ, 0);
1163 /* No more pending messages */
1164 ahc_clear_msg_state(ahc);
1166 /* Clear interrupt state */
1167 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1168 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
1171 * Although the driver does not care about the
1172 * 'Selection in Progress' status bit, the busy
1173 * LED does. SELINGO is only cleared by a sucessfull
1174 * selection, so we must manually clear it to insure
1175 * the LED turns off just incase no future successful
1176 * selections occur (e.g. no devices on the bus).
1178 ahc_outb(ahc, CLRSINT0, CLRSELINGO);
1180 scbptr = ahc_inb(ahc, WAITING_SCBH);
1181 ahc_outb(ahc, SCBPTR, scbptr);
1182 scb_index = ahc_inb(ahc, SCB_TAG);
1184 scb = ahc_lookup_scb(ahc, scb_index);
1185 if (scb == NULL) {
1186 printf("%s: ahc_intr - referenced scb not "
1187 "valid during SELTO scb(%d, %d)\n",
1188 ahc_name(ahc), scbptr, scb_index);
1189 ahc_dump_card_state(ahc);
1190 } else {
1191 struct ahc_devinfo devinfo;
1192 #ifdef AHC_DEBUG
1193 if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
1194 ahc_print_path(ahc, scb);
1195 printf("Saw Selection Timeout for SCB 0x%x\n",
1196 scb_index);
1198 #endif
1199 ahc_scb_devinfo(ahc, &devinfo, scb);
1200 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
1201 ahc_freeze_devq(ahc, scb);
1204 * Cancel any pending transactions on the device
1205 * now that it seems to be missing. This will
1206 * also revert us to async/narrow transfers until
1207 * we can renegotiate with the device.
1209 ahc_handle_devreset(ahc, &devinfo,
1210 CAM_SEL_TIMEOUT,
1211 "Selection Timeout",
1212 /*verbose_level*/1);
1214 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1215 ahc_restart(ahc);
1216 } else if ((status & BUSFREE) != 0
1217 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
1218 struct ahc_devinfo devinfo;
1219 u_int lastphase;
1220 u_int saved_scsiid;
1221 u_int saved_lun;
1222 u_int target;
1223 u_int initiator_role_id;
1224 char channel;
1225 int printerror;
1228 * Clear our selection hardware as soon as possible.
1229 * We may have an entry in the waiting Q for this target,
1230 * that is affected by this busfree and we don't want to
1231 * go about selecting the target while we handle the event.
1233 ahc_outb(ahc, SCSISEQ,
1234 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
1237 * Disable busfree interrupts and clear the busfree
1238 * interrupt status. We do this here so that several
1239 * bus transactions occur prior to clearing the SCSIINT
1240 * latch. It can take a bit for the clearing to take effect.
1242 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
1243 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
1246 * Look at what phase we were last in.
1247 * If its message out, chances are pretty good
1248 * that the busfree was in response to one of
1249 * our abort requests.
1251 lastphase = ahc_inb(ahc, LASTPHASE);
1252 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
1253 saved_lun = ahc_inb(ahc, SAVED_LUN);
1254 target = SCSIID_TARGET(ahc, saved_scsiid);
1255 initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
1256 channel = SCSIID_CHANNEL(ahc, saved_scsiid);
1257 ahc_compile_devinfo(&devinfo, initiator_role_id,
1258 target, saved_lun, channel, ROLE_INITIATOR);
1259 printerror = 1;
1261 if (lastphase == P_MESGOUT) {
1262 u_int tag;
1264 tag = SCB_LIST_NULL;
1265 if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
1266 || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
1267 if (ahc->msgout_buf[ahc->msgout_index - 1]
1268 == MSG_ABORT_TAG)
1269 tag = scb->hscb->tag;
1270 ahc_print_path(ahc, scb);
1271 printf("SCB %d - Abort%s Completed.\n",
1272 scb->hscb->tag, tag == SCB_LIST_NULL ?
1273 "" : " Tag");
1274 ahc_abort_scbs(ahc, target, channel,
1275 saved_lun, tag,
1276 ROLE_INITIATOR,
1277 CAM_REQ_ABORTED);
1278 printerror = 0;
1279 } else if (ahc_sent_msg(ahc, AHCMSG_1B,
1280 MSG_BUS_DEV_RESET, TRUE)) {
1281 #ifdef __FreeBSD__
1283 * Don't mark the user's request for this BDR
1284 * as completing with CAM_BDR_SENT. CAM3
1285 * specifies CAM_REQ_CMP.
1287 if (scb != NULL
1288 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
1289 && ahc_match_scb(ahc, scb, target, channel,
1290 CAM_LUN_WILDCARD,
1291 SCB_LIST_NULL,
1292 ROLE_INITIATOR)) {
1293 ahc_set_transaction_status(scb, CAM_REQ_CMP);
1295 #endif
1296 ahc_compile_devinfo(&devinfo,
1297 initiator_role_id,
1298 target,
1299 CAM_LUN_WILDCARD,
1300 channel,
1301 ROLE_INITIATOR);
1302 ahc_handle_devreset(ahc, &devinfo,
1303 CAM_BDR_SENT,
1304 "Bus Device Reset",
1305 /*verbose_level*/0);
1306 printerror = 0;
1307 } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1308 MSG_EXT_PPR, FALSE)) {
1309 struct ahc_initiator_tinfo *tinfo;
1310 struct ahc_tmode_tstate *tstate;
1313 * PPR Rejected. Try non-ppr negotiation
1314 * and retry command.
1316 tinfo = ahc_fetch_transinfo(ahc,
1317 devinfo.channel,
1318 devinfo.our_scsiid,
1319 devinfo.target,
1320 &tstate);
1321 tinfo->curr.transport_version = 2;
1322 tinfo->goal.transport_version = 2;
1323 tinfo->goal.ppr_options = 0;
1324 ahc_qinfifo_requeue_tail(ahc, scb);
1325 printerror = 0;
1326 } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1327 MSG_EXT_WDTR, FALSE)) {
1329 * Negotiation Rejected. Go-narrow and
1330 * retry command.
1332 ahc_set_width(ahc, &devinfo,
1333 MSG_EXT_WDTR_BUS_8_BIT,
1334 AHC_TRANS_CUR|AHC_TRANS_GOAL,
1335 /*paused*/TRUE);
1336 ahc_qinfifo_requeue_tail(ahc, scb);
1337 printerror = 0;
1338 } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
1339 MSG_EXT_SDTR, FALSE)) {
1341 * Negotiation Rejected. Go-async and
1342 * retry command.
1344 ahc_set_syncrate(ahc, &devinfo,
1345 /*syncrate*/NULL,
1346 /*period*/0, /*offset*/0,
1347 /*ppr_options*/0,
1348 AHC_TRANS_CUR|AHC_TRANS_GOAL,
1349 /*paused*/TRUE);
1350 ahc_qinfifo_requeue_tail(ahc, scb);
1351 printerror = 0;
1354 if (printerror != 0) {
1355 u_int i;
1357 if (scb != NULL) {
1358 u_int tag;
1360 if ((scb->hscb->control & TAG_ENB) != 0)
1361 tag = scb->hscb->tag;
1362 else
1363 tag = SCB_LIST_NULL;
1364 ahc_print_path(ahc, scb);
1365 ahc_abort_scbs(ahc, target, channel,
1366 SCB_GET_LUN(scb), tag,
1367 ROLE_INITIATOR,
1368 CAM_UNEXP_BUSFREE);
1369 } else {
1371 * We had not fully identified this connection,
1372 * so we cannot abort anything.
1374 printf("%s: ", ahc_name(ahc));
1376 for (i = 0; i < num_phases; i++) {
1377 if (lastphase == ahc_phase_table[i].phase)
1378 break;
1380 if (lastphase != P_BUSFREE) {
1382 * Renegotiate with this device at the
1383 * next oportunity just in case this busfree
1384 * is due to a negotiation mismatch with the
1385 * device.
1387 ahc_force_renegotiation(ahc, &devinfo);
1389 printf("Unexpected busfree %s\n"
1390 "SEQADDR == 0x%x\n",
1391 ahc_phase_table[i].phasemsg,
1392 ahc_inb(ahc, SEQADDR0)
1393 | (ahc_inb(ahc, SEQADDR1) << 8));
1395 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1396 ahc_restart(ahc);
1397 } else {
1398 printf("%s: Missing case in ahc_handle_scsiint. status = %x\n",
1399 ahc_name(ahc), status);
1400 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1405 * Force renegotiation to occur the next time we initiate
1406 * a command to the current device.
1408 static void
1409 ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
1411 struct ahc_initiator_tinfo *targ_info;
1412 struct ahc_tmode_tstate *tstate;
1414 targ_info = ahc_fetch_transinfo(ahc,
1415 devinfo->channel,
1416 devinfo->our_scsiid,
1417 devinfo->target,
1418 &tstate);
1419 ahc_update_neg_request(ahc, devinfo, tstate,
1420 targ_info, AHC_NEG_IF_NON_ASYNC);
1423 #define AHC_MAX_STEPS 2000
1424 void
1425 ahc_clear_critical_section(struct ahc_softc *ahc)
1427 int stepping;
1428 int steps;
1429 u_int simode0;
1430 u_int simode1;
1432 if (ahc->num_critical_sections == 0)
1433 return;
1435 stepping = FALSE;
1436 steps = 0;
1437 simode0 = 0;
1438 simode1 = 0;
1439 for (;;) {
1440 struct cs *cs;
1441 u_int seqaddr;
1442 u_int i;
1444 seqaddr = ahc_inb(ahc, SEQADDR0)
1445 | (ahc_inb(ahc, SEQADDR1) << 8);
1448 * Seqaddr represents the next instruction to execute,
1449 * so we are really executing the instruction just
1450 * before it.
1452 if (seqaddr != 0)
1453 seqaddr -= 1;
1454 cs = ahc->critical_sections;
1455 for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
1457 if (cs->begin < seqaddr && cs->end >= seqaddr)
1458 break;
1461 if (i == ahc->num_critical_sections)
1462 break;
1464 if (steps > AHC_MAX_STEPS) {
1465 printf("%s: Infinite loop in critical section\n",
1466 ahc_name(ahc));
1467 ahc_dump_card_state(ahc);
1468 panic("critical section loop");
1471 steps++;
1472 if (stepping == FALSE) {
1475 * Disable all interrupt sources so that the
1476 * sequencer will not be stuck by a pausing
1477 * interrupt condition while we attempt to
1478 * leave a critical section.
1480 simode0 = ahc_inb(ahc, SIMODE0);
1481 ahc_outb(ahc, SIMODE0, 0);
1482 simode1 = ahc_inb(ahc, SIMODE1);
1483 if ((ahc->features & AHC_DT) != 0)
1485 * On DT class controllers, we
1486 * use the enhanced busfree logic.
1487 * Unfortunately we cannot re-enable
1488 * busfree detection within the
1489 * current connection, so we must
1490 * leave it on while single stepping.
1492 ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE);
1493 else
1494 ahc_outb(ahc, SIMODE1, 0);
1495 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1496 ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP);
1497 stepping = TRUE;
1499 if ((ahc->features & AHC_DT) != 0) {
1500 ahc_outb(ahc, CLRSINT1, CLRBUSFREE);
1501 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1503 ahc_outb(ahc, HCNTRL, ahc->unpause);
1504 while (!ahc_is_paused(ahc))
1505 ahc_delay(200);
1507 if (stepping) {
1508 ahc_outb(ahc, SIMODE0, simode0);
1509 ahc_outb(ahc, SIMODE1, simode1);
1510 ahc_outb(ahc, SEQCTL, ahc->seqctl);
1515 * Clear any pending interrupt status.
1517 void
1518 ahc_clear_intstat(struct ahc_softc *ahc)
1520 /* Clear any interrupt conditions this may have caused */
1521 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
1522 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
1523 CLRREQINIT);
1524 ahc_flush_device_writes(ahc);
1525 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
1526 ahc_flush_device_writes(ahc);
1527 ahc_outb(ahc, CLRINT, CLRSCSIINT);
1528 ahc_flush_device_writes(ahc);
1531 /**************************** Debugging Routines ******************************/
1532 #ifdef AHC_DEBUG
1533 uint32_t ahc_debug = AHC_DEBUG_OPTS;
1534 #endif
1536 void
1537 ahc_print_scb(struct scb *scb)
1539 int i;
1541 struct hardware_scb *hscb = scb->hscb;
1543 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
1544 (void *)scb,
1545 hscb->control,
1546 hscb->scsiid,
1547 hscb->lun,
1548 hscb->cdb_len);
1549 printf("Shared Data: ");
1550 for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
1551 printf("%#02x", hscb->shared_data.cdb[i]);
1552 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
1553 ahc_le32toh(hscb->dataptr),
1554 ahc_le32toh(hscb->datacnt),
1555 ahc_le32toh(hscb->sgptr),
1556 hscb->tag);
1557 if (scb->sg_count > 0) {
1558 for (i = 0; i < scb->sg_count; i++) {
1559 printf("sg[%d] - Addr 0x%x%x : Length %d\n",
1561 (ahc_le32toh(scb->sg_list[i].len) >> 24
1562 & SG_HIGH_ADDR_BITS),
1563 ahc_le32toh(scb->sg_list[i].addr),
1564 ahc_le32toh(scb->sg_list[i].len));
1569 /************************* Transfer Negotiation *******************************/
1571 * Allocate per target mode instance (ID we respond to as a target)
1572 * transfer negotiation data structures.
1574 static struct ahc_tmode_tstate *
1575 ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
1577 struct ahc_tmode_tstate *master_tstate;
1578 struct ahc_tmode_tstate *tstate;
1579 int i;
1581 master_tstate = ahc->enabled_targets[ahc->our_id];
1582 if (channel == 'B') {
1583 scsi_id += 8;
1584 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
1586 if (ahc->enabled_targets[scsi_id] != NULL
1587 && ahc->enabled_targets[scsi_id] != master_tstate)
1588 panic("%s: ahc_alloc_tstate - Target already allocated",
1589 ahc_name(ahc));
1590 tstate = (struct ahc_tmode_tstate*)malloc(sizeof(*tstate),
1591 M_DEVBUF, M_NOWAIT);
1592 if (tstate == NULL)
1593 return (NULL);
1596 * If we have allocated a master tstate, copy user settings from
1597 * the master tstate (taken from SRAM or the EEPROM) for this
1598 * channel, but reset our current and goal settings to async/narrow
1599 * until an initiator talks to us.
1601 if (master_tstate != NULL) {
1602 memcpy(tstate, master_tstate, sizeof(*tstate));
1603 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
1604 tstate->ultraenb = 0;
1605 for (i = 0; i < AHC_NUM_TARGETS; i++) {
1606 memset(&tstate->transinfo[i].curr, 0,
1607 sizeof(tstate->transinfo[i].curr));
1608 memset(&tstate->transinfo[i].goal, 0,
1609 sizeof(tstate->transinfo[i].goal));
1611 } else
1612 memset(tstate, 0, sizeof(*tstate));
1613 ahc->enabled_targets[scsi_id] = tstate;
1614 return (tstate);
1617 #ifdef AHC_TARGET_MODE
1619 * Free per target mode instance (ID we respond to as a target)
1620 * transfer negotiation data structures.
1622 static void
1623 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
1625 struct ahc_tmode_tstate *tstate;
1628 * Don't clean up our "master" tstate.
1629 * It has our default user settings.
1631 if (((channel == 'B' && scsi_id == ahc->our_id_b)
1632 || (channel == 'A' && scsi_id == ahc->our_id))
1633 && force == FALSE)
1634 return;
1636 if (channel == 'B')
1637 scsi_id += 8;
1638 tstate = ahc->enabled_targets[scsi_id];
1639 if (tstate != NULL)
1640 free(tstate, M_DEVBUF);
1641 ahc->enabled_targets[scsi_id] = NULL;
1643 #endif
1646 * Called when we have an active connection to a target on the bus,
1647 * this function finds the nearest syncrate to the input period limited
1648 * by the capabilities of the bus connectivity of and sync settings for
1649 * the target.
1651 struct ahc_syncrate *
1652 ahc_devlimited_syncrate(struct ahc_softc *ahc,
1653 struct ahc_initiator_tinfo *tinfo,
1654 u_int *period, u_int *ppr_options, role_t role)
1656 struct ahc_transinfo *transinfo;
1657 u_int maxsync;
1659 if ((ahc->features & AHC_ULTRA2) != 0) {
1660 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
1661 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
1662 maxsync = AHC_SYNCRATE_DT;
1663 } else {
1664 maxsync = AHC_SYNCRATE_ULTRA;
1665 /* Can't do DT on an SE bus */
1666 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1668 } else if ((ahc->features & AHC_ULTRA) != 0) {
1669 maxsync = AHC_SYNCRATE_ULTRA;
1670 } else {
1671 maxsync = AHC_SYNCRATE_FAST;
1674 * Never allow a value higher than our current goal
1675 * period otherwise we may allow a target initiated
1676 * negotiation to go above the limit as set by the
1677 * user. In the case of an initiator initiated
1678 * sync negotiation, we limit based on the user
1679 * setting. This allows the system to still accept
1680 * incoming negotiations even if target initiated
1681 * negotiation is not performed.
1683 if (role == ROLE_TARGET)
1684 transinfo = &tinfo->user;
1685 else
1686 transinfo = &tinfo->goal;
1687 *ppr_options &= transinfo->ppr_options;
1688 if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
1689 maxsync = max(maxsync, (u_int)AHC_SYNCRATE_ULTRA2);
1690 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1692 if (transinfo->period == 0) {
1693 *period = 0;
1694 *ppr_options = 0;
1695 return (NULL);
1697 *period = max(*period, (u_int)transinfo->period);
1698 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
1702 * Look up the valid period to SCSIRATE conversion in our table.
1703 * Return the period and offset that should be sent to the target
1704 * if this was the beginning of an SDTR.
1706 struct ahc_syncrate *
1707 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
1708 u_int *ppr_options, u_int maxsync)
1710 struct ahc_syncrate *syncrate;
1712 if ((ahc->features & AHC_DT) == 0)
1713 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1715 /* Skip all DT only entries if DT is not available */
1716 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
1717 && maxsync < AHC_SYNCRATE_ULTRA2)
1718 maxsync = AHC_SYNCRATE_ULTRA2;
1720 /* Now set the maxsync based on the card capabilities
1721 * DT is already done above */
1722 if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0
1723 && maxsync < AHC_SYNCRATE_ULTRA)
1724 maxsync = AHC_SYNCRATE_ULTRA;
1725 if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0
1726 && maxsync < AHC_SYNCRATE_FAST)
1727 maxsync = AHC_SYNCRATE_FAST;
1729 for (syncrate = &ahc_syncrates[maxsync];
1730 syncrate->rate != NULL;
1731 syncrate++) {
1734 * The Ultra2 table doesn't go as low
1735 * as for the Fast/Ultra cards.
1737 if ((ahc->features & AHC_ULTRA2) != 0
1738 && (syncrate->sxfr_u2 == 0))
1739 break;
1741 if (*period <= syncrate->period) {
1743 * When responding to a target that requests
1744 * sync, the requested rate may fall between
1745 * two rates that we can output, but still be
1746 * a rate that we can receive. Because of this,
1747 * we want to respond to the target with
1748 * the same rate that it sent to us even
1749 * if the period we use to send data to it
1750 * is lower. Only lower the response period
1751 * if we must.
1753 if (syncrate == &ahc_syncrates[maxsync])
1754 *period = syncrate->period;
1757 * At some speeds, we only support
1758 * ST transfers.
1760 if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
1761 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1762 break;
1766 if ((*period == 0)
1767 || (syncrate->rate == NULL)
1768 || ((ahc->features & AHC_ULTRA2) != 0
1769 && (syncrate->sxfr_u2 == 0))) {
1770 /* Use asynchronous transfers. */
1771 *period = 0;
1772 syncrate = NULL;
1773 *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
1775 return (syncrate);
1779 * Convert from an entry in our syncrate table to the SCSI equivalent
1780 * sync "period" factor.
1782 u_int
1783 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
1785 struct ahc_syncrate *syncrate;
1787 if ((ahc->features & AHC_ULTRA2) != 0)
1788 scsirate &= SXFR_ULTRA2;
1789 else
1790 scsirate &= SXFR;
1792 /* now set maxsync based on card capabilities */
1793 if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2)
1794 maxsync = AHC_SYNCRATE_ULTRA2;
1795 if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0
1796 && maxsync < AHC_SYNCRATE_ULTRA)
1797 maxsync = AHC_SYNCRATE_ULTRA;
1798 if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0
1799 && maxsync < AHC_SYNCRATE_FAST)
1800 maxsync = AHC_SYNCRATE_FAST;
1803 syncrate = &ahc_syncrates[maxsync];
1804 while (syncrate->rate != NULL) {
1806 if ((ahc->features & AHC_ULTRA2) != 0) {
1807 if (syncrate->sxfr_u2 == 0)
1808 break;
1809 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
1810 return (syncrate->period);
1811 } else if (scsirate == (syncrate->sxfr & SXFR)) {
1812 return (syncrate->period);
1814 syncrate++;
1816 return (0); /* async */
1820 * Truncate the given synchronous offset to a value the
1821 * current adapter type and syncrate are capable of.
1823 void
1824 ahc_validate_offset(struct ahc_softc *ahc,
1825 struct ahc_initiator_tinfo *tinfo,
1826 struct ahc_syncrate *syncrate,
1827 u_int *offset, int wide, role_t role)
1829 u_int maxoffset;
1831 /* Limit offset to what we can do */
1832 if (syncrate == NULL) {
1833 maxoffset = 0;
1834 } else if ((ahc->features & AHC_ULTRA2) != 0) {
1835 maxoffset = MAX_OFFSET_ULTRA2;
1836 } else {
1837 if (wide)
1838 maxoffset = MAX_OFFSET_16BIT;
1839 else
1840 maxoffset = MAX_OFFSET_8BIT;
1842 *offset = min(*offset, maxoffset);
1843 if (tinfo != NULL) {
1844 if (role == ROLE_TARGET)
1845 *offset = min(*offset, (u_int)tinfo->user.offset);
1846 else
1847 *offset = min(*offset, (u_int)tinfo->goal.offset);
1852 * Truncate the given transfer width parameter to a value the
1853 * current adapter type is capable of.
1855 void
1856 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
1857 u_int *bus_width, role_t role)
1859 switch (*bus_width) {
1860 default:
1861 if (ahc->features & AHC_WIDE) {
1862 /* Respond Wide */
1863 *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1864 break;
1866 /* FALLTHROUGH */
1867 case MSG_EXT_WDTR_BUS_8_BIT:
1868 *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1869 break;
1871 if (tinfo != NULL) {
1872 if (role == ROLE_TARGET)
1873 *bus_width = min((u_int)tinfo->user.width, *bus_width);
1874 else
1875 *bus_width = min((u_int)tinfo->goal.width, *bus_width);
1880 * Update the bitmask of targets for which the controller should
1881 * negotiate with at the next convenient oportunity. This currently
1882 * means the next time we send the initial identify messages for
1883 * a new transaction.
1886 ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1887 struct ahc_tmode_tstate *tstate,
1888 struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type)
1890 u_int auto_negotiate_orig;
1892 auto_negotiate_orig = tstate->auto_negotiate;
1893 if (neg_type == AHC_NEG_ALWAYS) {
1895 * Force our "current" settings to be
1896 * unknown so that unless a bus reset
1897 * occurs the need to renegotiate is
1898 * recorded persistently.
1900 if ((ahc->features & AHC_WIDE) != 0)
1901 tinfo->curr.width = AHC_WIDTH_UNKNOWN;
1902 tinfo->curr.period = AHC_PERIOD_UNKNOWN;
1903 tinfo->curr.offset = AHC_OFFSET_UNKNOWN;
1905 if (tinfo->curr.period != tinfo->goal.period
1906 || tinfo->curr.width != tinfo->goal.width
1907 || tinfo->curr.offset != tinfo->goal.offset
1908 || tinfo->curr.ppr_options != tinfo->goal.ppr_options
1909 || (neg_type == AHC_NEG_IF_NON_ASYNC
1910 && (tinfo->goal.offset != 0
1911 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
1912 || tinfo->goal.ppr_options != 0)))
1913 tstate->auto_negotiate |= devinfo->target_mask;
1914 else
1915 tstate->auto_negotiate &= ~devinfo->target_mask;
1917 return (auto_negotiate_orig != tstate->auto_negotiate);
1921 * Update the user/goal/curr tables of synchronous negotiation
1922 * parameters as well as, in the case of a current or active update,
1923 * any data structures on the host controller. In the case of an
1924 * active update, the specified target is currently talking to us on
1925 * the bus, so the transfer parameter update must take effect
1926 * immediately.
1928 void
1929 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1930 struct ahc_syncrate *syncrate, u_int period,
1931 u_int offset, u_int ppr_options, u_int type, int paused)
1933 struct ahc_initiator_tinfo *tinfo;
1934 struct ahc_tmode_tstate *tstate;
1935 u_int old_period;
1936 u_int old_offset;
1937 u_int old_ppr;
1938 int active;
1939 int update_needed;
1941 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
1942 update_needed = 0;
1944 if (syncrate == NULL) {
1945 period = 0;
1946 offset = 0;
1949 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1950 devinfo->target, &tstate);
1952 if ((type & AHC_TRANS_USER) != 0) {
1953 tinfo->user.period = period;
1954 tinfo->user.offset = offset;
1955 tinfo->user.ppr_options = ppr_options;
1958 if ((type & AHC_TRANS_GOAL) != 0) {
1959 tinfo->goal.period = period;
1960 tinfo->goal.offset = offset;
1961 tinfo->goal.ppr_options = ppr_options;
1964 old_period = tinfo->curr.period;
1965 old_offset = tinfo->curr.offset;
1966 old_ppr = tinfo->curr.ppr_options;
1968 if ((type & AHC_TRANS_CUR) != 0
1969 && (old_period != period
1970 || old_offset != offset
1971 || old_ppr != ppr_options)) {
1972 u_int scsirate;
1974 update_needed++;
1975 scsirate = tinfo->scsirate;
1976 if ((ahc->features & AHC_ULTRA2) != 0) {
1978 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
1979 if (syncrate != NULL) {
1980 scsirate |= syncrate->sxfr_u2;
1981 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
1982 scsirate |= ENABLE_CRC;
1983 else
1984 scsirate |= SINGLE_EDGE;
1986 } else {
1988 scsirate &= ~(SXFR|SOFS);
1990 * Ensure Ultra mode is set properly for
1991 * this target.
1993 tstate->ultraenb &= ~devinfo->target_mask;
1994 if (syncrate != NULL) {
1995 if (syncrate->sxfr & ULTRA_SXFR) {
1996 tstate->ultraenb |=
1997 devinfo->target_mask;
1999 scsirate |= syncrate->sxfr & SXFR;
2000 scsirate |= offset & SOFS;
2002 if (active) {
2003 u_int sxfrctl0;
2005 sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
2006 sxfrctl0 &= ~FAST20;
2007 if (tstate->ultraenb & devinfo->target_mask)
2008 sxfrctl0 |= FAST20;
2009 ahc_outb(ahc, SXFRCTL0, sxfrctl0);
2012 if (active) {
2013 ahc_outb(ahc, SCSIRATE, scsirate);
2014 if ((ahc->features & AHC_ULTRA2) != 0)
2015 ahc_outb(ahc, SCSIOFFSET, offset);
2018 tinfo->scsirate = scsirate;
2019 tinfo->curr.period = period;
2020 tinfo->curr.offset = offset;
2021 tinfo->curr.ppr_options = ppr_options;
2023 ahc_send_async(ahc, devinfo->channel, devinfo->target,
2024 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
2025 if (bootverbose) {
2026 if (offset != 0) {
2027 printf("%s: target %d synchronous at %sMHz%s, "
2028 "offset = 0x%x\n", ahc_name(ahc),
2029 devinfo->target, syncrate->rate,
2030 (ppr_options & MSG_EXT_PPR_DT_REQ)
2031 ? " DT" : "", offset);
2032 } else {
2033 printf("%s: target %d using "
2034 "asynchronous transfers\n",
2035 ahc_name(ahc), devinfo->target);
2040 update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
2041 tinfo, AHC_NEG_TO_GOAL);
2043 if (update_needed)
2044 ahc_update_pending_scbs(ahc);
2048 * Update the user/goal/curr tables of wide negotiation
2049 * parameters as well as, in the case of a current or active update,
2050 * any data structures on the host controller. In the case of an
2051 * active update, the specified target is currently talking to us on
2052 * the bus, so the transfer parameter update must take effect
2053 * immediately.
2055 void
2056 ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2057 u_int width, u_int type, int paused)
2059 struct ahc_initiator_tinfo *tinfo;
2060 struct ahc_tmode_tstate *tstate;
2061 u_int oldwidth;
2062 int active;
2063 int update_needed;
2065 active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
2066 update_needed = 0;
2067 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2068 devinfo->target, &tstate);
2070 if ((type & AHC_TRANS_USER) != 0)
2071 tinfo->user.width = width;
2073 if ((type & AHC_TRANS_GOAL) != 0)
2074 tinfo->goal.width = width;
2076 oldwidth = tinfo->curr.width;
2077 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
2078 u_int scsirate;
2080 update_needed++;
2081 scsirate = tinfo->scsirate;
2082 scsirate &= ~WIDEXFER;
2083 if (width == MSG_EXT_WDTR_BUS_16_BIT)
2084 scsirate |= WIDEXFER;
2086 tinfo->scsirate = scsirate;
2088 if (active)
2089 ahc_outb(ahc, SCSIRATE, scsirate);
2091 tinfo->curr.width = width;
2093 ahc_send_async(ahc, devinfo->channel, devinfo->target,
2094 CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
2095 if (bootverbose) {
2096 printf("%s: target %d using %dbit transfers\n",
2097 ahc_name(ahc), devinfo->target,
2098 8 * (0x01 << width));
2102 update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
2103 tinfo, AHC_NEG_TO_GOAL);
2104 if (update_needed)
2105 ahc_update_pending_scbs(ahc);
2109 * Update the current state of tagged queuing for a given target.
2111 static void
2112 ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
2113 struct ahc_devinfo *devinfo, ahc_queue_alg alg)
2115 struct scsi_device *sdev = cmd->device;
2117 ahc_platform_set_tags(ahc, sdev, devinfo, alg);
2118 ahc_send_async(ahc, devinfo->channel, devinfo->target,
2119 devinfo->lun, AC_TRANSFER_NEG);
2123 * When the transfer settings for a connection change, update any
2124 * in-transit SCBs to contain the new data so the hardware will
2125 * be set correctly during future (re)selections.
2127 static void
2128 ahc_update_pending_scbs(struct ahc_softc *ahc)
2130 struct scb *pending_scb;
2131 int pending_scb_count;
2132 int i;
2133 int paused;
2134 u_int saved_scbptr;
2137 * Traverse the pending SCB list and ensure that all of the
2138 * SCBs there have the proper settings.
2140 pending_scb_count = 0;
2141 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
2142 struct ahc_devinfo devinfo;
2143 struct hardware_scb *pending_hscb;
2144 struct ahc_initiator_tinfo *tinfo;
2145 struct ahc_tmode_tstate *tstate;
2147 ahc_scb_devinfo(ahc, &devinfo, pending_scb);
2148 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
2149 devinfo.our_scsiid,
2150 devinfo.target, &tstate);
2151 pending_hscb = pending_scb->hscb;
2152 pending_hscb->control &= ~ULTRAENB;
2153 if ((tstate->ultraenb & devinfo.target_mask) != 0)
2154 pending_hscb->control |= ULTRAENB;
2155 pending_hscb->scsirate = tinfo->scsirate;
2156 pending_hscb->scsioffset = tinfo->curr.offset;
2157 if ((tstate->auto_negotiate & devinfo.target_mask) == 0
2158 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
2159 pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
2160 pending_hscb->control &= ~MK_MESSAGE;
2162 ahc_sync_scb(ahc, pending_scb,
2163 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2164 pending_scb_count++;
2167 if (pending_scb_count == 0)
2168 return;
2170 if (ahc_is_paused(ahc)) {
2171 paused = 1;
2172 } else {
2173 paused = 0;
2174 ahc_pause(ahc);
2177 saved_scbptr = ahc_inb(ahc, SCBPTR);
2178 /* Ensure that the hscbs down on the card match the new information */
2179 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
2180 struct hardware_scb *pending_hscb;
2181 u_int control;
2182 u_int scb_tag;
2184 ahc_outb(ahc, SCBPTR, i);
2185 scb_tag = ahc_inb(ahc, SCB_TAG);
2186 pending_scb = ahc_lookup_scb(ahc, scb_tag);
2187 if (pending_scb == NULL)
2188 continue;
2190 pending_hscb = pending_scb->hscb;
2191 control = ahc_inb(ahc, SCB_CONTROL);
2192 control &= ~(ULTRAENB|MK_MESSAGE);
2193 control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
2194 ahc_outb(ahc, SCB_CONTROL, control);
2195 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
2196 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
2198 ahc_outb(ahc, SCBPTR, saved_scbptr);
2200 if (paused == 0)
2201 ahc_unpause(ahc);
2204 /**************************** Pathing Information *****************************/
2205 static void
2206 ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2208 u_int saved_scsiid;
2209 role_t role;
2210 int our_id;
2212 if (ahc_inb(ahc, SSTAT0) & TARGET)
2213 role = ROLE_TARGET;
2214 else
2215 role = ROLE_INITIATOR;
2217 if (role == ROLE_TARGET
2218 && (ahc->features & AHC_MULTI_TID) != 0
2219 && (ahc_inb(ahc, SEQ_FLAGS)
2220 & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
2221 /* We were selected, so pull our id from TARGIDIN */
2222 our_id = ahc_inb(ahc, TARGIDIN) & OID;
2223 } else if ((ahc->features & AHC_ULTRA2) != 0)
2224 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
2225 else
2226 our_id = ahc_inb(ahc, SCSIID) & OID;
2228 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
2229 ahc_compile_devinfo(devinfo,
2230 our_id,
2231 SCSIID_TARGET(ahc, saved_scsiid),
2232 ahc_inb(ahc, SAVED_LUN),
2233 SCSIID_CHANNEL(ahc, saved_scsiid),
2234 role);
2237 struct ahc_phase_table_entry*
2238 ahc_lookup_phase_entry(int phase)
2240 struct ahc_phase_table_entry *entry;
2241 struct ahc_phase_table_entry *last_entry;
2244 * num_phases doesn't include the default entry which
2245 * will be returned if the phase doesn't match.
2247 last_entry = &ahc_phase_table[num_phases];
2248 for (entry = ahc_phase_table; entry < last_entry; entry++) {
2249 if (phase == entry->phase)
2250 break;
2252 return (entry);
2255 void
2256 ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
2257 u_int lun, char channel, role_t role)
2259 devinfo->our_scsiid = our_id;
2260 devinfo->target = target;
2261 devinfo->lun = lun;
2262 devinfo->target_offset = target;
2263 devinfo->channel = channel;
2264 devinfo->role = role;
2265 if (channel == 'B')
2266 devinfo->target_offset += 8;
2267 devinfo->target_mask = (0x01 << devinfo->target_offset);
2270 void
2271 ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2273 printf("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel,
2274 devinfo->target, devinfo->lun);
2277 static void
2278 ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2279 struct scb *scb)
2281 role_t role;
2282 int our_id;
2284 our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
2285 role = ROLE_INITIATOR;
2286 if ((scb->flags & SCB_TARGET_SCB) != 0)
2287 role = ROLE_TARGET;
2288 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
2289 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
2293 /************************ Message Phase Processing ****************************/
2294 static void
2295 ahc_assert_atn(struct ahc_softc *ahc)
2297 u_int scsisigo;
2299 scsisigo = ATNO;
2300 if ((ahc->features & AHC_DT) == 0)
2301 scsisigo |= ahc_inb(ahc, SCSISIGI);
2302 ahc_outb(ahc, SCSISIGO, scsisigo);
2306 * When an initiator transaction with the MK_MESSAGE flag either reconnects
2307 * or enters the initial message out phase, we are interrupted. Fill our
2308 * outgoing message buffer with the appropriate message and beging handing
2309 * the message phase(s) manually.
2311 static void
2312 ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2313 struct scb *scb)
2316 * To facilitate adding multiple messages together,
2317 * each routine should increment the index and len
2318 * variables instead of setting them explicitly.
2320 ahc->msgout_index = 0;
2321 ahc->msgout_len = 0;
2323 if ((scb->flags & SCB_DEVICE_RESET) == 0
2324 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
2325 u_int identify_msg;
2327 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
2328 if ((scb->hscb->control & DISCENB) != 0)
2329 identify_msg |= MSG_IDENTIFY_DISCFLAG;
2330 ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
2331 ahc->msgout_len++;
2333 if ((scb->hscb->control & TAG_ENB) != 0) {
2334 ahc->msgout_buf[ahc->msgout_index++] =
2335 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
2336 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
2337 ahc->msgout_len += 2;
2341 if (scb->flags & SCB_DEVICE_RESET) {
2342 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
2343 ahc->msgout_len++;
2344 ahc_print_path(ahc, scb);
2345 printf("Bus Device Reset Message Sent\n");
2347 * Clear our selection hardware in advance of
2348 * the busfree. We may have an entry in the waiting
2349 * Q for this target, and we don't want to go about
2350 * selecting while we handle the busfree and blow it
2351 * away.
2353 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2354 } else if ((scb->flags & SCB_ABORT) != 0) {
2355 if ((scb->hscb->control & TAG_ENB) != 0)
2356 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
2357 else
2358 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
2359 ahc->msgout_len++;
2360 ahc_print_path(ahc, scb);
2361 printf("Abort%s Message Sent\n",
2362 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
2364 * Clear our selection hardware in advance of
2365 * the busfree. We may have an entry in the waiting
2366 * Q for this target, and we don't want to go about
2367 * selecting while we handle the busfree and blow it
2368 * away.
2370 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
2371 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
2372 ahc_build_transfer_msg(ahc, devinfo);
2373 } else {
2374 printf("ahc_intr: AWAITING_MSG for an SCB that "
2375 "does not have a waiting message\n");
2376 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
2377 devinfo->target_mask);
2378 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
2379 "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
2380 ahc_inb(ahc, MSG_OUT), scb->flags);
2384 * Clear the MK_MESSAGE flag from the SCB so we aren't
2385 * asked to send this message again.
2387 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
2388 scb->hscb->control &= ~MK_MESSAGE;
2389 ahc->msgout_index = 0;
2390 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2394 * Build an appropriate transfer negotiation message for the
2395 * currently active target.
2397 static void
2398 ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
2401 * We need to initiate transfer negotiations.
2402 * If our current and goal settings are identical,
2403 * we want to renegotiate due to a check condition.
2405 struct ahc_initiator_tinfo *tinfo;
2406 struct ahc_tmode_tstate *tstate;
2407 struct ahc_syncrate *rate;
2408 int dowide;
2409 int dosync;
2410 int doppr;
2411 u_int period;
2412 u_int ppr_options;
2413 u_int offset;
2415 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
2416 devinfo->target, &tstate);
2418 * Filter our period based on the current connection.
2419 * If we can't perform DT transfers on this segment (not in LVD
2420 * mode for instance), then our decision to issue a PPR message
2421 * may change.
2423 period = tinfo->goal.period;
2424 offset = tinfo->goal.offset;
2425 ppr_options = tinfo->goal.ppr_options;
2426 /* Target initiated PPR is not allowed in the SCSI spec */
2427 if (devinfo->role == ROLE_TARGET)
2428 ppr_options = 0;
2429 rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
2430 &ppr_options, devinfo->role);
2431 dowide = tinfo->curr.width != tinfo->goal.width;
2432 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
2434 * Only use PPR if we have options that need it, even if the device
2435 * claims to support it. There might be an expander in the way
2436 * that doesn't.
2438 doppr = ppr_options != 0;
2440 if (!dowide && !dosync && !doppr) {
2441 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
2442 dosync = tinfo->goal.offset != 0;
2445 if (!dowide && !dosync && !doppr) {
2447 * Force async with a WDTR message if we have a wide bus,
2448 * or just issue an SDTR with a 0 offset.
2450 if ((ahc->features & AHC_WIDE) != 0)
2451 dowide = 1;
2452 else
2453 dosync = 1;
2455 if (bootverbose) {
2456 ahc_print_devinfo(ahc, devinfo);
2457 printf("Ensuring async\n");
2461 /* Target initiated PPR is not allowed in the SCSI spec */
2462 if (devinfo->role == ROLE_TARGET)
2463 doppr = 0;
2466 * Both the PPR message and SDTR message require the
2467 * goal syncrate to be limited to what the target device
2468 * is capable of handling (based on whether an LVD->SE
2469 * expander is on the bus), so combine these two cases.
2470 * Regardless, guarantee that if we are using WDTR and SDTR
2471 * messages that WDTR comes first.
2473 if (doppr || (dosync && !dowide)) {
2475 offset = tinfo->goal.offset;
2476 ahc_validate_offset(ahc, tinfo, rate, &offset,
2477 doppr ? tinfo->goal.width
2478 : tinfo->curr.width,
2479 devinfo->role);
2480 if (doppr) {
2481 ahc_construct_ppr(ahc, devinfo, period, offset,
2482 tinfo->goal.width, ppr_options);
2483 } else {
2484 ahc_construct_sdtr(ahc, devinfo, period, offset);
2486 } else {
2487 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
2492 * Build a synchronous negotiation message in our message
2493 * buffer based on the input parameters.
2495 static void
2496 ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2497 u_int period, u_int offset)
2499 if (offset == 0)
2500 period = AHC_ASYNC_XFER_PERIOD;
2501 ahc->msgout_index += spi_populate_sync_msg(
2502 ahc->msgout_buf + ahc->msgout_index, period, offset);
2503 ahc->msgout_len += 5;
2504 if (bootverbose) {
2505 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
2506 ahc_name(ahc), devinfo->channel, devinfo->target,
2507 devinfo->lun, period, offset);
2512 * Build a wide negotiation message in our message
2513 * buffer based on the input parameters.
2515 static void
2516 ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2517 u_int bus_width)
2519 ahc->msgout_index += spi_populate_width_msg(
2520 ahc->msgout_buf + ahc->msgout_index, bus_width);
2521 ahc->msgout_len += 4;
2522 if (bootverbose) {
2523 printf("(%s:%c:%d:%d): Sending WDTR %x\n",
2524 ahc_name(ahc), devinfo->channel, devinfo->target,
2525 devinfo->lun, bus_width);
2530 * Build a parallel protocol request message in our message
2531 * buffer based on the input parameters.
2533 static void
2534 ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
2535 u_int period, u_int offset, u_int bus_width,
2536 u_int ppr_options)
2538 if (offset == 0)
2539 period = AHC_ASYNC_XFER_PERIOD;
2540 ahc->msgout_index += spi_populate_ppr_msg(
2541 ahc->msgout_buf + ahc->msgout_index, period, offset,
2542 bus_width, ppr_options);
2543 ahc->msgout_len += 8;
2544 if (bootverbose) {
2545 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
2546 "offset %x, ppr_options %x\n", ahc_name(ahc),
2547 devinfo->channel, devinfo->target, devinfo->lun,
2548 bus_width, period, offset, ppr_options);
2553 * Clear any active message state.
2555 static void
2556 ahc_clear_msg_state(struct ahc_softc *ahc)
2558 ahc->msgout_len = 0;
2559 ahc->msgin_index = 0;
2560 ahc->msg_type = MSG_TYPE_NONE;
2561 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
2563 * The target didn't care to respond to our
2564 * message request, so clear ATN.
2566 ahc_outb(ahc, CLRSINT1, CLRATNO);
2568 ahc_outb(ahc, MSG_OUT, MSG_NOOP);
2569 ahc_outb(ahc, SEQ_FLAGS2,
2570 ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
2573 static void
2574 ahc_handle_proto_violation(struct ahc_softc *ahc)
2576 struct ahc_devinfo devinfo;
2577 struct scb *scb;
2578 u_int scbid;
2579 u_int seq_flags;
2580 u_int curphase;
2581 u_int lastphase;
2582 int found;
2584 ahc_fetch_devinfo(ahc, &devinfo);
2585 scbid = ahc_inb(ahc, SCB_TAG);
2586 scb = ahc_lookup_scb(ahc, scbid);
2587 seq_flags = ahc_inb(ahc, SEQ_FLAGS);
2588 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2589 lastphase = ahc_inb(ahc, LASTPHASE);
2590 if ((seq_flags & NOT_IDENTIFIED) != 0) {
2593 * The reconnecting target either did not send an
2594 * identify message, or did, but we didn't find an SCB
2595 * to match.
2597 ahc_print_devinfo(ahc, &devinfo);
2598 printf("Target did not send an IDENTIFY message. "
2599 "LASTPHASE = 0x%x.\n", lastphase);
2600 scb = NULL;
2601 } else if (scb == NULL) {
2603 * We don't seem to have an SCB active for this
2604 * transaction. Print an error and reset the bus.
2606 ahc_print_devinfo(ahc, &devinfo);
2607 printf("No SCB found during protocol violation\n");
2608 goto proto_violation_reset;
2609 } else {
2610 ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
2611 if ((seq_flags & NO_CDB_SENT) != 0) {
2612 ahc_print_path(ahc, scb);
2613 printf("No or incomplete CDB sent to device.\n");
2614 } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) {
2616 * The target never bothered to provide status to
2617 * us prior to completing the command. Since we don't
2618 * know the disposition of this command, we must attempt
2619 * to abort it. Assert ATN and prepare to send an abort
2620 * message.
2622 ahc_print_path(ahc, scb);
2623 printf("Completed command without status.\n");
2624 } else {
2625 ahc_print_path(ahc, scb);
2626 printf("Unknown protocol violation.\n");
2627 ahc_dump_card_state(ahc);
2630 if ((lastphase & ~P_DATAIN_DT) == 0
2631 || lastphase == P_COMMAND) {
2632 proto_violation_reset:
2634 * Target either went directly to data/command
2635 * phase or didn't respond to our ATN.
2636 * The only safe thing to do is to blow
2637 * it away with a bus reset.
2639 found = ahc_reset_channel(ahc, 'A', TRUE);
2640 printf("%s: Issued Channel %c Bus Reset. "
2641 "%d SCBs aborted\n", ahc_name(ahc), 'A', found);
2642 } else {
2644 * Leave the selection hardware off in case
2645 * this abort attempt will affect yet to
2646 * be sent commands.
2648 ahc_outb(ahc, SCSISEQ,
2649 ahc_inb(ahc, SCSISEQ) & ~ENSELO);
2650 ahc_assert_atn(ahc);
2651 ahc_outb(ahc, MSG_OUT, HOST_MSG);
2652 if (scb == NULL) {
2653 ahc_print_devinfo(ahc, &devinfo);
2654 ahc->msgout_buf[0] = MSG_ABORT_TASK;
2655 ahc->msgout_len = 1;
2656 ahc->msgout_index = 0;
2657 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2658 } else {
2659 ahc_print_path(ahc, scb);
2660 scb->flags |= SCB_ABORT;
2662 printf("Protocol violation %s. Attempting to abort.\n",
2663 ahc_lookup_phase_entry(curphase)->phasemsg);
2668 * Manual message loop handler.
2670 static void
2671 ahc_handle_message_phase(struct ahc_softc *ahc)
2673 struct ahc_devinfo devinfo;
2674 u_int bus_phase;
2675 int end_session;
2677 ahc_fetch_devinfo(ahc, &devinfo);
2678 end_session = FALSE;
2679 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
2681 reswitch:
2682 switch (ahc->msg_type) {
2683 case MSG_TYPE_INITIATOR_MSGOUT:
2685 int lastbyte;
2686 int phasemis;
2687 int msgdone;
2689 if (ahc->msgout_len == 0)
2690 panic("HOST_MSG_LOOP interrupt with no active message");
2692 #ifdef AHC_DEBUG
2693 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2694 ahc_print_devinfo(ahc, &devinfo);
2695 printf("INITIATOR_MSG_OUT");
2697 #endif
2698 phasemis = bus_phase != P_MESGOUT;
2699 if (phasemis) {
2700 #ifdef AHC_DEBUG
2701 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2702 printf(" PHASEMIS %s\n",
2703 ahc_lookup_phase_entry(bus_phase)
2704 ->phasemsg);
2706 #endif
2707 if (bus_phase == P_MESGIN) {
2709 * Change gears and see if
2710 * this messages is of interest to
2711 * us or should be passed back to
2712 * the sequencer.
2714 ahc_outb(ahc, CLRSINT1, CLRATNO);
2715 ahc->send_msg_perror = FALSE;
2716 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
2717 ahc->msgin_index = 0;
2718 goto reswitch;
2720 end_session = TRUE;
2721 break;
2724 if (ahc->send_msg_perror) {
2725 ahc_outb(ahc, CLRSINT1, CLRATNO);
2726 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2727 #ifdef AHC_DEBUG
2728 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2729 printf(" byte 0x%x\n", ahc->send_msg_perror);
2730 #endif
2731 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
2732 break;
2735 msgdone = ahc->msgout_index == ahc->msgout_len;
2736 if (msgdone) {
2738 * The target has requested a retry.
2739 * Re-assert ATN, reset our message index to
2740 * 0, and try again.
2742 ahc->msgout_index = 0;
2743 ahc_assert_atn(ahc);
2746 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
2747 if (lastbyte) {
2748 /* Last byte is signified by dropping ATN */
2749 ahc_outb(ahc, CLRSINT1, CLRATNO);
2753 * Clear our interrupt status and present
2754 * the next byte on the bus.
2756 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2757 #ifdef AHC_DEBUG
2758 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2759 printf(" byte 0x%x\n",
2760 ahc->msgout_buf[ahc->msgout_index]);
2761 #endif
2762 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2763 break;
2765 case MSG_TYPE_INITIATOR_MSGIN:
2767 int phasemis;
2768 int message_done;
2770 #ifdef AHC_DEBUG
2771 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2772 ahc_print_devinfo(ahc, &devinfo);
2773 printf("INITIATOR_MSG_IN");
2775 #endif
2776 phasemis = bus_phase != P_MESGIN;
2777 if (phasemis) {
2778 #ifdef AHC_DEBUG
2779 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2780 printf(" PHASEMIS %s\n",
2781 ahc_lookup_phase_entry(bus_phase)
2782 ->phasemsg);
2784 #endif
2785 ahc->msgin_index = 0;
2786 if (bus_phase == P_MESGOUT
2787 && (ahc->send_msg_perror == TRUE
2788 || (ahc->msgout_len != 0
2789 && ahc->msgout_index == 0))) {
2790 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
2791 goto reswitch;
2793 end_session = TRUE;
2794 break;
2797 /* Pull the byte in without acking it */
2798 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
2799 #ifdef AHC_DEBUG
2800 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
2801 printf(" byte 0x%x\n",
2802 ahc->msgin_buf[ahc->msgin_index]);
2803 #endif
2805 message_done = ahc_parse_msg(ahc, &devinfo);
2807 if (message_done) {
2809 * Clear our incoming message buffer in case there
2810 * is another message following this one.
2812 ahc->msgin_index = 0;
2815 * If this message illicited a response,
2816 * assert ATN so the target takes us to the
2817 * message out phase.
2819 if (ahc->msgout_len != 0) {
2820 #ifdef AHC_DEBUG
2821 if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
2822 ahc_print_devinfo(ahc, &devinfo);
2823 printf("Asserting ATN for response\n");
2825 #endif
2826 ahc_assert_atn(ahc);
2828 } else
2829 ahc->msgin_index++;
2831 if (message_done == MSGLOOP_TERMINATED) {
2832 end_session = TRUE;
2833 } else {
2834 /* Ack the byte */
2835 ahc_outb(ahc, CLRSINT1, CLRREQINIT);
2836 ahc_inb(ahc, SCSIDATL);
2838 break;
2840 case MSG_TYPE_TARGET_MSGIN:
2842 int msgdone;
2843 int msgout_request;
2845 if (ahc->msgout_len == 0)
2846 panic("Target MSGIN with no active message");
2849 * If we interrupted a mesgout session, the initiator
2850 * will not know this until our first REQ. So, we
2851 * only honor mesgout requests after we've sent our
2852 * first byte.
2854 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
2855 && ahc->msgout_index > 0)
2856 msgout_request = TRUE;
2857 else
2858 msgout_request = FALSE;
2860 if (msgout_request) {
2863 * Change gears and see if
2864 * this messages is of interest to
2865 * us or should be passed back to
2866 * the sequencer.
2868 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
2869 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
2870 ahc->msgin_index = 0;
2871 /* Dummy read to REQ for first byte */
2872 ahc_inb(ahc, SCSIDATL);
2873 ahc_outb(ahc, SXFRCTL0,
2874 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2875 break;
2878 msgdone = ahc->msgout_index == ahc->msgout_len;
2879 if (msgdone) {
2880 ahc_outb(ahc, SXFRCTL0,
2881 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2882 end_session = TRUE;
2883 break;
2887 * Present the next byte on the bus.
2889 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2890 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
2891 break;
2893 case MSG_TYPE_TARGET_MSGOUT:
2895 int lastbyte;
2896 int msgdone;
2899 * The initiator signals that this is
2900 * the last byte by dropping ATN.
2902 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
2905 * Read the latched byte, but turn off SPIOEN first
2906 * so that we don't inadvertently cause a REQ for the
2907 * next byte.
2909 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
2910 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
2911 msgdone = ahc_parse_msg(ahc, &devinfo);
2912 if (msgdone == MSGLOOP_TERMINATED) {
2914 * The message is *really* done in that it caused
2915 * us to go to bus free. The sequencer has already
2916 * been reset at this point, so pull the ejection
2917 * handle.
2919 return;
2922 ahc->msgin_index++;
2925 * XXX Read spec about initiator dropping ATN too soon
2926 * and use msgdone to detect it.
2928 if (msgdone == MSGLOOP_MSGCOMPLETE) {
2929 ahc->msgin_index = 0;
2932 * If this message illicited a response, transition
2933 * to the Message in phase and send it.
2935 if (ahc->msgout_len != 0) {
2936 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
2937 ahc_outb(ahc, SXFRCTL0,
2938 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2939 ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
2940 ahc->msgin_index = 0;
2941 break;
2945 if (lastbyte)
2946 end_session = TRUE;
2947 else {
2948 /* Ask for the next byte. */
2949 ahc_outb(ahc, SXFRCTL0,
2950 ahc_inb(ahc, SXFRCTL0) | SPIOEN);
2953 break;
2955 default:
2956 panic("Unknown REQINIT message type");
2959 if (end_session) {
2960 ahc_clear_msg_state(ahc);
2961 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
2962 } else
2963 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
2967 * See if we sent a particular extended message to the target.
2968 * If "full" is true, return true only if the target saw the full
2969 * message. If "full" is false, return true if the target saw at
2970 * least the first byte of the message.
2972 static int
2973 ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
2975 int found;
2976 u_int index;
2978 found = FALSE;
2979 index = 0;
2981 while (index < ahc->msgout_len) {
2982 if (ahc->msgout_buf[index] == MSG_EXTENDED) {
2983 u_int end_index;
2985 end_index = index + 1 + ahc->msgout_buf[index + 1];
2986 if (ahc->msgout_buf[index+2] == msgval
2987 && type == AHCMSG_EXT) {
2989 if (full) {
2990 if (ahc->msgout_index > end_index)
2991 found = TRUE;
2992 } else if (ahc->msgout_index > index)
2993 found = TRUE;
2995 index = end_index;
2996 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
2997 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
2999 /* Skip tag type and tag id or residue param*/
3000 index += 2;
3001 } else {
3002 /* Single byte message */
3003 if (type == AHCMSG_1B
3004 && ahc->msgout_buf[index] == msgval
3005 && ahc->msgout_index > index)
3006 found = TRUE;
3007 index++;
3010 if (found)
3011 break;
3013 return (found);
3017 * Wait for a complete incoming message, parse it, and respond accordingly.
3019 static int
3020 ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3022 struct ahc_initiator_tinfo *tinfo;
3023 struct ahc_tmode_tstate *tstate;
3024 int reject;
3025 int done;
3026 int response;
3027 u_int targ_scsirate;
3029 done = MSGLOOP_IN_PROG;
3030 response = FALSE;
3031 reject = FALSE;
3032 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
3033 devinfo->target, &tstate);
3034 targ_scsirate = tinfo->scsirate;
3037 * Parse as much of the message as is available,
3038 * rejecting it if we don't support it. When
3039 * the entire message is available and has been
3040 * handled, return MSGLOOP_MSGCOMPLETE, indicating
3041 * that we have parsed an entire message.
3043 * In the case of extended messages, we accept the length
3044 * byte outright and perform more checking once we know the
3045 * extended message type.
3047 switch (ahc->msgin_buf[0]) {
3048 case MSG_DISCONNECT:
3049 case MSG_SAVEDATAPOINTER:
3050 case MSG_CMDCOMPLETE:
3051 case MSG_RESTOREPOINTERS:
3052 case MSG_IGN_WIDE_RESIDUE:
3054 * End our message loop as these are messages
3055 * the sequencer handles on its own.
3057 done = MSGLOOP_TERMINATED;
3058 break;
3059 case MSG_MESSAGE_REJECT:
3060 response = ahc_handle_msg_reject(ahc, devinfo);
3061 /* FALLTHROUGH */
3062 case MSG_NOOP:
3063 done = MSGLOOP_MSGCOMPLETE;
3064 break;
3065 case MSG_EXTENDED:
3067 /* Wait for enough of the message to begin validation */
3068 if (ahc->msgin_index < 2)
3069 break;
3070 switch (ahc->msgin_buf[2]) {
3071 case MSG_EXT_SDTR:
3073 struct ahc_syncrate *syncrate;
3074 u_int period;
3075 u_int ppr_options;
3076 u_int offset;
3077 u_int saved_offset;
3079 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
3080 reject = TRUE;
3081 break;
3085 * Wait until we have both args before validating
3086 * and acting on this message.
3088 * Add one to MSG_EXT_SDTR_LEN to account for
3089 * the extended message preamble.
3091 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
3092 break;
3094 period = ahc->msgin_buf[3];
3095 ppr_options = 0;
3096 saved_offset = offset = ahc->msgin_buf[4];
3097 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3098 &ppr_options,
3099 devinfo->role);
3100 ahc_validate_offset(ahc, tinfo, syncrate, &offset,
3101 targ_scsirate & WIDEXFER,
3102 devinfo->role);
3103 if (bootverbose) {
3104 printf("(%s:%c:%d:%d): Received "
3105 "SDTR period %x, offset %x\n\t"
3106 "Filtered to period %x, offset %x\n",
3107 ahc_name(ahc), devinfo->channel,
3108 devinfo->target, devinfo->lun,
3109 ahc->msgin_buf[3], saved_offset,
3110 period, offset);
3112 ahc_set_syncrate(ahc, devinfo,
3113 syncrate, period,
3114 offset, ppr_options,
3115 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3116 /*paused*/TRUE);
3119 * See if we initiated Sync Negotiation
3120 * and didn't have to fall down to async
3121 * transfers.
3123 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
3124 /* We started it */
3125 if (saved_offset != offset) {
3126 /* Went too low - force async */
3127 reject = TRUE;
3129 } else {
3131 * Send our own SDTR in reply
3133 if (bootverbose
3134 && devinfo->role == ROLE_INITIATOR) {
3135 printf("(%s:%c:%d:%d): Target "
3136 "Initiated SDTR\n",
3137 ahc_name(ahc), devinfo->channel,
3138 devinfo->target, devinfo->lun);
3140 ahc->msgout_index = 0;
3141 ahc->msgout_len = 0;
3142 ahc_construct_sdtr(ahc, devinfo,
3143 period, offset);
3144 ahc->msgout_index = 0;
3145 response = TRUE;
3147 done = MSGLOOP_MSGCOMPLETE;
3148 break;
3150 case MSG_EXT_WDTR:
3152 u_int bus_width;
3153 u_int saved_width;
3154 u_int sending_reply;
3156 sending_reply = FALSE;
3157 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
3158 reject = TRUE;
3159 break;
3163 * Wait until we have our arg before validating
3164 * and acting on this message.
3166 * Add one to MSG_EXT_WDTR_LEN to account for
3167 * the extended message preamble.
3169 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
3170 break;
3172 bus_width = ahc->msgin_buf[3];
3173 saved_width = bus_width;
3174 ahc_validate_width(ahc, tinfo, &bus_width,
3175 devinfo->role);
3176 if (bootverbose) {
3177 printf("(%s:%c:%d:%d): Received WDTR "
3178 "%x filtered to %x\n",
3179 ahc_name(ahc), devinfo->channel,
3180 devinfo->target, devinfo->lun,
3181 saved_width, bus_width);
3184 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
3186 * Don't send a WDTR back to the
3187 * target, since we asked first.
3188 * If the width went higher than our
3189 * request, reject it.
3191 if (saved_width > bus_width) {
3192 reject = TRUE;
3193 printf("(%s:%c:%d:%d): requested %dBit "
3194 "transfers. Rejecting...\n",
3195 ahc_name(ahc), devinfo->channel,
3196 devinfo->target, devinfo->lun,
3197 8 * (0x01 << bus_width));
3198 bus_width = 0;
3200 } else {
3202 * Send our own WDTR in reply
3204 if (bootverbose
3205 && devinfo->role == ROLE_INITIATOR) {
3206 printf("(%s:%c:%d:%d): Target "
3207 "Initiated WDTR\n",
3208 ahc_name(ahc), devinfo->channel,
3209 devinfo->target, devinfo->lun);
3211 ahc->msgout_index = 0;
3212 ahc->msgout_len = 0;
3213 ahc_construct_wdtr(ahc, devinfo, bus_width);
3214 ahc->msgout_index = 0;
3215 response = TRUE;
3216 sending_reply = TRUE;
3219 * After a wide message, we are async, but
3220 * some devices don't seem to honor this portion
3221 * of the spec. Force a renegotiation of the
3222 * sync component of our transfer agreement even
3223 * if our goal is async. By updating our width
3224 * after forcing the negotiation, we avoid
3225 * renegotiating for width.
3227 ahc_update_neg_request(ahc, devinfo, tstate,
3228 tinfo, AHC_NEG_ALWAYS);
3229 ahc_set_width(ahc, devinfo, bus_width,
3230 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3231 /*paused*/TRUE);
3232 if (sending_reply == FALSE && reject == FALSE) {
3235 * We will always have an SDTR to send.
3237 ahc->msgout_index = 0;
3238 ahc->msgout_len = 0;
3239 ahc_build_transfer_msg(ahc, devinfo);
3240 ahc->msgout_index = 0;
3241 response = TRUE;
3243 done = MSGLOOP_MSGCOMPLETE;
3244 break;
3246 case MSG_EXT_PPR:
3248 struct ahc_syncrate *syncrate;
3249 u_int period;
3250 u_int offset;
3251 u_int bus_width;
3252 u_int ppr_options;
3253 u_int saved_width;
3254 u_int saved_offset;
3255 u_int saved_ppr_options;
3257 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
3258 reject = TRUE;
3259 break;
3263 * Wait until we have all args before validating
3264 * and acting on this message.
3266 * Add one to MSG_EXT_PPR_LEN to account for
3267 * the extended message preamble.
3269 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
3270 break;
3272 period = ahc->msgin_buf[3];
3273 offset = ahc->msgin_buf[5];
3274 bus_width = ahc->msgin_buf[6];
3275 saved_width = bus_width;
3276 ppr_options = ahc->msgin_buf[7];
3278 * According to the spec, a DT only
3279 * period factor with no DT option
3280 * set implies async.
3282 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
3283 && period == 9)
3284 offset = 0;
3285 saved_ppr_options = ppr_options;
3286 saved_offset = offset;
3289 * Mask out any options we don't support
3290 * on any controller. Transfer options are
3291 * only available if we are negotiating wide.
3293 ppr_options &= MSG_EXT_PPR_DT_REQ;
3294 if (bus_width == 0)
3295 ppr_options = 0;
3297 ahc_validate_width(ahc, tinfo, &bus_width,
3298 devinfo->role);
3299 syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
3300 &ppr_options,
3301 devinfo->role);
3302 ahc_validate_offset(ahc, tinfo, syncrate,
3303 &offset, bus_width,
3304 devinfo->role);
3306 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
3308 * If we are unable to do any of the
3309 * requested options (we went too low),
3310 * then we'll have to reject the message.
3312 if (saved_width > bus_width
3313 || saved_offset != offset
3314 || saved_ppr_options != ppr_options) {
3315 reject = TRUE;
3316 period = 0;
3317 offset = 0;
3318 bus_width = 0;
3319 ppr_options = 0;
3320 syncrate = NULL;
3322 } else {
3323 if (devinfo->role != ROLE_TARGET)
3324 printf("(%s:%c:%d:%d): Target "
3325 "Initiated PPR\n",
3326 ahc_name(ahc), devinfo->channel,
3327 devinfo->target, devinfo->lun);
3328 else
3329 printf("(%s:%c:%d:%d): Initiator "
3330 "Initiated PPR\n",
3331 ahc_name(ahc), devinfo->channel,
3332 devinfo->target, devinfo->lun);
3333 ahc->msgout_index = 0;
3334 ahc->msgout_len = 0;
3335 ahc_construct_ppr(ahc, devinfo, period, offset,
3336 bus_width, ppr_options);
3337 ahc->msgout_index = 0;
3338 response = TRUE;
3340 if (bootverbose) {
3341 printf("(%s:%c:%d:%d): Received PPR width %x, "
3342 "period %x, offset %x,options %x\n"
3343 "\tFiltered to width %x, period %x, "
3344 "offset %x, options %x\n",
3345 ahc_name(ahc), devinfo->channel,
3346 devinfo->target, devinfo->lun,
3347 saved_width, ahc->msgin_buf[3],
3348 saved_offset, saved_ppr_options,
3349 bus_width, period, offset, ppr_options);
3351 ahc_set_width(ahc, devinfo, bus_width,
3352 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3353 /*paused*/TRUE);
3354 ahc_set_syncrate(ahc, devinfo,
3355 syncrate, period,
3356 offset, ppr_options,
3357 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3358 /*paused*/TRUE);
3359 done = MSGLOOP_MSGCOMPLETE;
3360 break;
3362 default:
3363 /* Unknown extended message. Reject it. */
3364 reject = TRUE;
3365 break;
3367 break;
3369 #ifdef AHC_TARGET_MODE
3370 case MSG_BUS_DEV_RESET:
3371 ahc_handle_devreset(ahc, devinfo,
3372 CAM_BDR_SENT,
3373 "Bus Device Reset Received",
3374 /*verbose_level*/0);
3375 ahc_restart(ahc);
3376 done = MSGLOOP_TERMINATED;
3377 break;
3378 case MSG_ABORT_TAG:
3379 case MSG_ABORT:
3380 case MSG_CLEAR_QUEUE:
3382 int tag;
3384 /* Target mode messages */
3385 if (devinfo->role != ROLE_TARGET) {
3386 reject = TRUE;
3387 break;
3389 tag = SCB_LIST_NULL;
3390 if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
3391 tag = ahc_inb(ahc, INITIATOR_TAG);
3392 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3393 devinfo->lun, tag, ROLE_TARGET,
3394 CAM_REQ_ABORTED);
3396 tstate = ahc->enabled_targets[devinfo->our_scsiid];
3397 if (tstate != NULL) {
3398 struct ahc_tmode_lstate* lstate;
3400 lstate = tstate->enabled_luns[devinfo->lun];
3401 if (lstate != NULL) {
3402 ahc_queue_lstate_event(ahc, lstate,
3403 devinfo->our_scsiid,
3404 ahc->msgin_buf[0],
3405 /*arg*/tag);
3406 ahc_send_lstate_events(ahc, lstate);
3409 ahc_restart(ahc);
3410 done = MSGLOOP_TERMINATED;
3411 break;
3413 #endif
3414 case MSG_TERM_IO_PROC:
3415 default:
3416 reject = TRUE;
3417 break;
3420 if (reject) {
3422 * Setup to reject the message.
3424 ahc->msgout_index = 0;
3425 ahc->msgout_len = 1;
3426 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
3427 done = MSGLOOP_MSGCOMPLETE;
3428 response = TRUE;
3431 if (done != MSGLOOP_IN_PROG && !response)
3432 /* Clear the outgoing message buffer */
3433 ahc->msgout_len = 0;
3435 return (done);
3439 * Process a message reject message.
3441 static int
3442 ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3445 * What we care about here is if we had an
3446 * outstanding SDTR or WDTR message for this
3447 * target. If we did, this is a signal that
3448 * the target is refusing negotiation.
3450 struct scb *scb;
3451 struct ahc_initiator_tinfo *tinfo;
3452 struct ahc_tmode_tstate *tstate;
3453 u_int scb_index;
3454 u_int last_msg;
3455 int response = 0;
3457 scb_index = ahc_inb(ahc, SCB_TAG);
3458 scb = ahc_lookup_scb(ahc, scb_index);
3459 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
3460 devinfo->our_scsiid,
3461 devinfo->target, &tstate);
3462 /* Might be necessary */
3463 last_msg = ahc_inb(ahc, LAST_MSG);
3465 if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
3467 * Target does not support the PPR message.
3468 * Attempt to negotiate SPI-2 style.
3470 if (bootverbose) {
3471 printf("(%s:%c:%d:%d): PPR Rejected. "
3472 "Trying WDTR/SDTR\n",
3473 ahc_name(ahc), devinfo->channel,
3474 devinfo->target, devinfo->lun);
3476 tinfo->goal.ppr_options = 0;
3477 tinfo->curr.transport_version = 2;
3478 tinfo->goal.transport_version = 2;
3479 ahc->msgout_index = 0;
3480 ahc->msgout_len = 0;
3481 ahc_build_transfer_msg(ahc, devinfo);
3482 ahc->msgout_index = 0;
3483 response = 1;
3484 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
3486 /* note 8bit xfers */
3487 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
3488 "8bit transfers\n", ahc_name(ahc),
3489 devinfo->channel, devinfo->target, devinfo->lun);
3490 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3491 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3492 /*paused*/TRUE);
3494 * No need to clear the sync rate. If the target
3495 * did not accept the command, our syncrate is
3496 * unaffected. If the target started the negotiation,
3497 * but rejected our response, we already cleared the
3498 * sync rate before sending our WDTR.
3500 if (tinfo->goal.offset != tinfo->curr.offset) {
3502 /* Start the sync negotiation */
3503 ahc->msgout_index = 0;
3504 ahc->msgout_len = 0;
3505 ahc_build_transfer_msg(ahc, devinfo);
3506 ahc->msgout_index = 0;
3507 response = 1;
3509 } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
3510 /* note asynch xfers and clear flag */
3511 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
3512 /*offset*/0, /*ppr_options*/0,
3513 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
3514 /*paused*/TRUE);
3515 printf("(%s:%c:%d:%d): refuses synchronous negotiation. "
3516 "Using asynchronous transfers\n",
3517 ahc_name(ahc), devinfo->channel,
3518 devinfo->target, devinfo->lun);
3519 } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
3520 int tag_type;
3521 int mask;
3523 tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
3525 if (tag_type == MSG_SIMPLE_TASK) {
3526 printf("(%s:%c:%d:%d): refuses tagged commands. "
3527 "Performing non-tagged I/O\n", ahc_name(ahc),
3528 devinfo->channel, devinfo->target, devinfo->lun);
3529 ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE);
3530 mask = ~0x23;
3531 } else {
3532 printf("(%s:%c:%d:%d): refuses %s tagged commands. "
3533 "Performing simple queue tagged I/O only\n",
3534 ahc_name(ahc), devinfo->channel, devinfo->target,
3535 devinfo->lun, tag_type == MSG_ORDERED_TASK
3536 ? "ordered" : "head of queue");
3537 ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC);
3538 mask = ~0x03;
3542 * Resend the identify for this CCB as the target
3543 * may believe that the selection is invalid otherwise.
3545 ahc_outb(ahc, SCB_CONTROL,
3546 ahc_inb(ahc, SCB_CONTROL) & mask);
3547 scb->hscb->control &= mask;
3548 ahc_set_transaction_tag(scb, /*enabled*/FALSE,
3549 /*type*/MSG_SIMPLE_TASK);
3550 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
3551 ahc_assert_atn(ahc);
3554 * This transaction is now at the head of
3555 * the untagged queue for this target.
3557 if ((ahc->flags & AHC_SCB_BTT) == 0) {
3558 struct scb_tailq *untagged_q;
3560 untagged_q =
3561 &(ahc->untagged_queues[devinfo->target_offset]);
3562 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
3563 scb->flags |= SCB_UNTAGGEDQ;
3565 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
3566 scb->hscb->tag);
3569 * Requeue all tagged commands for this target
3570 * currently in our posession so they can be
3571 * converted to untagged commands.
3573 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
3574 SCB_GET_CHANNEL(ahc, scb),
3575 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
3576 ROLE_INITIATOR, CAM_REQUEUE_REQ,
3577 SEARCH_COMPLETE);
3578 } else {
3580 * Otherwise, we ignore it.
3582 printf("%s:%c:%d: Message reject for %x -- ignored\n",
3583 ahc_name(ahc), devinfo->channel, devinfo->target,
3584 last_msg);
3586 return (response);
3590 * Process an ingnore wide residue message.
3592 static void
3593 ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
3595 u_int scb_index;
3596 struct scb *scb;
3598 scb_index = ahc_inb(ahc, SCB_TAG);
3599 scb = ahc_lookup_scb(ahc, scb_index);
3601 * XXX Actually check data direction in the sequencer?
3602 * Perhaps add datadir to some spare bits in the hscb?
3604 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
3605 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
3607 * Ignore the message if we haven't
3608 * seen an appropriate data phase yet.
3610 } else {
3612 * If the residual occurred on the last
3613 * transfer and the transfer request was
3614 * expected to end on an odd count, do
3615 * nothing. Otherwise, subtract a byte
3616 * and update the residual count accordingly.
3618 uint32_t sgptr;
3620 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3621 if ((sgptr & SG_LIST_NULL) != 0
3622 && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) {
3624 * If the residual occurred on the last
3625 * transfer and the transfer request was
3626 * expected to end on an odd count, do
3627 * nothing.
3629 } else {
3630 struct ahc_dma_seg *sg;
3631 uint32_t data_cnt;
3632 uint32_t data_addr;
3633 uint32_t sglen;
3635 /* Pull in all of the sgptr */
3636 sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR);
3637 data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT);
3639 if ((sgptr & SG_LIST_NULL) != 0) {
3641 * The residual data count is not updated
3642 * for the command run to completion case.
3643 * Explicitly zero the count.
3645 data_cnt &= ~AHC_SG_LEN_MASK;
3648 data_addr = ahc_inl(ahc, SHADDR);
3650 data_cnt += 1;
3651 data_addr -= 1;
3652 sgptr &= SG_PTR_MASK;
3654 sg = ahc_sg_bus_to_virt(scb, sgptr);
3657 * The residual sg ptr points to the next S/G
3658 * to load so we must go back one.
3660 sg--;
3661 sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
3662 if (sg != scb->sg_list
3663 && sglen < (data_cnt & AHC_SG_LEN_MASK)) {
3665 sg--;
3666 sglen = ahc_le32toh(sg->len);
3668 * Preserve High Address and SG_LIST bits
3669 * while setting the count to 1.
3671 data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
3672 data_addr = ahc_le32toh(sg->addr)
3673 + (sglen & AHC_SG_LEN_MASK) - 1;
3676 * Increment sg so it points to the
3677 * "next" sg.
3679 sg++;
3680 sgptr = ahc_sg_virt_to_bus(scb, sg);
3682 ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr);
3683 ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
3685 * Toggle the "oddness" of the transfer length
3686 * to handle this mid-transfer ignore wide
3687 * residue. This ensures that the oddness is
3688 * correct for subsequent data transfers.
3690 ahc_outb(ahc, SCB_LUN,
3691 ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD);
3698 * Reinitialize the data pointers for the active transfer
3699 * based on its current residual.
3701 static void
3702 ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
3704 struct scb *scb;
3705 struct ahc_dma_seg *sg;
3706 u_int scb_index;
3707 uint32_t sgptr;
3708 uint32_t resid;
3709 uint32_t dataptr;
3711 scb_index = ahc_inb(ahc, SCB_TAG);
3712 scb = ahc_lookup_scb(ahc, scb_index);
3713 sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
3714 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
3715 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
3716 | ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
3718 sgptr &= SG_PTR_MASK;
3719 sg = ahc_sg_bus_to_virt(scb, sgptr);
3721 /* The residual sg_ptr always points to the next sg */
3722 sg--;
3724 resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
3725 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
3726 | ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
3728 dataptr = ahc_le32toh(sg->addr)
3729 + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK)
3730 - resid;
3731 if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
3732 u_int dscommand1;
3734 dscommand1 = ahc_inb(ahc, DSCOMMAND1);
3735 ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
3736 ahc_outb(ahc, HADDR,
3737 (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
3738 ahc_outb(ahc, DSCOMMAND1, dscommand1);
3740 ahc_outb(ahc, HADDR + 3, dataptr >> 24);
3741 ahc_outb(ahc, HADDR + 2, dataptr >> 16);
3742 ahc_outb(ahc, HADDR + 1, dataptr >> 8);
3743 ahc_outb(ahc, HADDR, dataptr);
3744 ahc_outb(ahc, HCNT + 2, resid >> 16);
3745 ahc_outb(ahc, HCNT + 1, resid >> 8);
3746 ahc_outb(ahc, HCNT, resid);
3747 if ((ahc->features & AHC_ULTRA2) == 0) {
3748 ahc_outb(ahc, STCNT + 2, resid >> 16);
3749 ahc_outb(ahc, STCNT + 1, resid >> 8);
3750 ahc_outb(ahc, STCNT, resid);
3755 * Handle the effects of issuing a bus device reset message.
3757 static void
3758 ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3759 cam_status status, char *message, int verbose_level)
3761 #ifdef AHC_TARGET_MODE
3762 struct ahc_tmode_tstate* tstate;
3763 u_int lun;
3764 #endif
3765 int found;
3767 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
3768 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
3769 status);
3771 #ifdef AHC_TARGET_MODE
3773 * Send an immediate notify ccb to all target mord peripheral
3774 * drivers affected by this action.
3776 tstate = ahc->enabled_targets[devinfo->our_scsiid];
3777 if (tstate != NULL) {
3778 for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3779 struct ahc_tmode_lstate* lstate;
3781 lstate = tstate->enabled_luns[lun];
3782 if (lstate == NULL)
3783 continue;
3785 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
3786 MSG_BUS_DEV_RESET, /*arg*/0);
3787 ahc_send_lstate_events(ahc, lstate);
3790 #endif
3793 * Go back to async/narrow transfers and renegotiate.
3795 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
3796 AHC_TRANS_CUR, /*paused*/TRUE);
3797 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
3798 /*period*/0, /*offset*/0, /*ppr_options*/0,
3799 AHC_TRANS_CUR, /*paused*/TRUE);
3801 if (status != CAM_SEL_TIMEOUT)
3802 ahc_send_async(ahc, devinfo->channel, devinfo->target,
3803 CAM_LUN_WILDCARD, AC_SENT_BDR);
3805 if (message != NULL
3806 && (verbose_level <= bootverbose))
3807 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
3808 message, devinfo->channel, devinfo->target, found);
3811 #ifdef AHC_TARGET_MODE
3812 static void
3813 ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
3814 struct scb *scb)
3818 * To facilitate adding multiple messages together,
3819 * each routine should increment the index and len
3820 * variables instead of setting them explicitly.
3822 ahc->msgout_index = 0;
3823 ahc->msgout_len = 0;
3825 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
3826 ahc_build_transfer_msg(ahc, devinfo);
3827 else
3828 panic("ahc_intr: AWAITING target message with no message");
3830 ahc->msgout_index = 0;
3831 ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
3833 #endif
3834 /**************************** Initialization **********************************/
3836 * Allocate a controller structure for a new device
3837 * and perform initial initializion.
3839 struct ahc_softc *
3840 ahc_alloc(void *platform_arg, char *name)
3842 struct ahc_softc *ahc;
3843 int i;
3845 #ifndef __FreeBSD__
3846 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT);
3847 if (!ahc) {
3848 printf("aic7xxx: cannot malloc softc!\n");
3849 free(name, M_DEVBUF);
3850 return NULL;
3852 #else
3853 ahc = device_get_softc((device_t)platform_arg);
3854 #endif
3855 memset(ahc, 0, sizeof(*ahc));
3856 ahc->seep_config = malloc(sizeof(*ahc->seep_config),
3857 M_DEVBUF, M_NOWAIT);
3858 if (ahc->seep_config == NULL) {
3859 #ifndef __FreeBSD__
3860 free(ahc, M_DEVBUF);
3861 #endif
3862 free(name, M_DEVBUF);
3863 return (NULL);
3865 LIST_INIT(&ahc->pending_scbs);
3866 /* We don't know our unit number until the OSM sets it */
3867 ahc->name = name;
3868 ahc->unit = -1;
3869 ahc->description = NULL;
3870 ahc->channel = 'A';
3871 ahc->channel_b = 'B';
3872 ahc->chip = AHC_NONE;
3873 ahc->features = AHC_FENONE;
3874 ahc->bugs = AHC_BUGNONE;
3875 ahc->flags = AHC_FNONE;
3877 * Default to all error reporting enabled with the
3878 * sequencer operating at its fastest speed.
3879 * The bus attach code may modify this.
3881 ahc->seqctl = FASTMODE;
3883 for (i = 0; i < AHC_NUM_TARGETS; i++)
3884 TAILQ_INIT(&ahc->untagged_queues[i]);
3885 if (ahc_platform_alloc(ahc, platform_arg) != 0) {
3886 ahc_free(ahc);
3887 ahc = NULL;
3889 return (ahc);
3893 ahc_softc_init(struct ahc_softc *ahc)
3896 /* The IRQMS bit is only valid on VL and EISA chips */
3897 if ((ahc->chip & AHC_PCI) == 0)
3898 ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
3899 else
3900 ahc->unpause = 0;
3901 ahc->pause = ahc->unpause | PAUSE;
3902 /* XXX The shared scb data stuff should be deprecated */
3903 if (ahc->scb_data == NULL) {
3904 ahc->scb_data = malloc(sizeof(*ahc->scb_data),
3905 M_DEVBUF, M_NOWAIT);
3906 if (ahc->scb_data == NULL)
3907 return (ENOMEM);
3908 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data));
3911 return (0);
3914 void
3915 ahc_set_unit(struct ahc_softc *ahc, int unit)
3917 ahc->unit = unit;
3920 void
3921 ahc_set_name(struct ahc_softc *ahc, char *name)
3923 if (ahc->name != NULL)
3924 free(ahc->name, M_DEVBUF);
3925 ahc->name = name;
3928 void
3929 ahc_free(struct ahc_softc *ahc)
3931 int i;
3933 switch (ahc->init_level) {
3934 default:
3935 case 5:
3936 ahc_shutdown(ahc);
3937 /* FALLTHROUGH */
3938 case 4:
3939 ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
3940 ahc->shared_data_dmamap);
3941 /* FALLTHROUGH */
3942 case 3:
3943 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
3944 ahc->shared_data_dmamap);
3945 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
3946 ahc->shared_data_dmamap);
3947 /* FALLTHROUGH */
3948 case 2:
3949 ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
3950 case 1:
3951 #ifndef __linux__
3952 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
3953 #endif
3954 break;
3955 case 0:
3956 break;
3959 #ifndef __linux__
3960 ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
3961 #endif
3962 ahc_platform_free(ahc);
3963 ahc_fini_scbdata(ahc);
3964 for (i = 0; i < AHC_NUM_TARGETS; i++) {
3965 struct ahc_tmode_tstate *tstate;
3967 tstate = ahc->enabled_targets[i];
3968 if (tstate != NULL) {
3969 #ifdef AHC_TARGET_MODE
3970 int j;
3972 for (j = 0; j < AHC_NUM_LUNS; j++) {
3973 struct ahc_tmode_lstate *lstate;
3975 lstate = tstate->enabled_luns[j];
3976 if (lstate != NULL) {
3977 xpt_free_path(lstate->path);
3978 free(lstate, M_DEVBUF);
3981 #endif
3982 free(tstate, M_DEVBUF);
3985 #ifdef AHC_TARGET_MODE
3986 if (ahc->black_hole != NULL) {
3987 xpt_free_path(ahc->black_hole->path);
3988 free(ahc->black_hole, M_DEVBUF);
3990 #endif
3991 if (ahc->name != NULL)
3992 free(ahc->name, M_DEVBUF);
3993 if (ahc->seep_config != NULL)
3994 free(ahc->seep_config, M_DEVBUF);
3995 #ifndef __FreeBSD__
3996 free(ahc, M_DEVBUF);
3997 #endif
3998 return;
4001 void
4002 ahc_shutdown(void *arg)
4004 struct ahc_softc *ahc;
4005 int i;
4007 ahc = (struct ahc_softc *)arg;
4009 /* This will reset most registers to 0, but not all */
4010 ahc_reset(ahc, /*reinit*/FALSE);
4011 ahc_outb(ahc, SCSISEQ, 0);
4012 ahc_outb(ahc, SXFRCTL0, 0);
4013 ahc_outb(ahc, DSPCISTATUS, 0);
4015 for (i = TARG_SCSIRATE; i < SCSICONF; i++)
4016 ahc_outb(ahc, i, 0);
4020 * Reset the controller and record some information about it
4021 * that is only available just after a reset. If "reinit" is
4022 * non-zero, this reset occured after initial configuration
4023 * and the caller requests that the chip be fully reinitialized
4024 * to a runable state. Chip interrupts are *not* enabled after
4025 * a reinitialization. The caller must enable interrupts via
4026 * ahc_intr_enable().
4029 ahc_reset(struct ahc_softc *ahc, int reinit)
4031 u_int sblkctl;
4032 u_int sxfrctl1_a, sxfrctl1_b;
4033 int error;
4034 int wait;
4037 * Preserve the value of the SXFRCTL1 register for all channels.
4038 * It contains settings that affect termination and we don't want
4039 * to disturb the integrity of the bus.
4041 ahc_pause(ahc);
4042 sxfrctl1_b = 0;
4043 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
4044 u_int sblkctl;
4047 * Save channel B's settings in case this chip
4048 * is setup for TWIN channel operation.
4050 sblkctl = ahc_inb(ahc, SBLKCTL);
4051 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
4052 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
4053 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
4055 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
4057 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
4060 * Ensure that the reset has finished. We delay 1000us
4061 * prior to reading the register to make sure the chip
4062 * has sufficiently completed its reset to handle register
4063 * accesses.
4065 wait = 1000;
4066 do {
4067 ahc_delay(1000);
4068 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
4070 if (wait == 0) {
4071 printf("%s: WARNING - Failed chip reset! "
4072 "Trying to initialize anyway.\n", ahc_name(ahc));
4074 ahc_outb(ahc, HCNTRL, ahc->pause);
4076 /* Determine channel configuration */
4077 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
4078 /* No Twin Channel PCI cards */
4079 if ((ahc->chip & AHC_PCI) != 0)
4080 sblkctl &= ~SELBUSB;
4081 switch (sblkctl) {
4082 case 0:
4083 /* Single Narrow Channel */
4084 break;
4085 case 2:
4086 /* Wide Channel */
4087 ahc->features |= AHC_WIDE;
4088 break;
4089 case 8:
4090 /* Twin Channel */
4091 ahc->features |= AHC_TWIN;
4092 break;
4093 default:
4094 printf(" Unsupported adapter type. Ignoring\n");
4095 return(-1);
4099 * Reload sxfrctl1.
4101 * We must always initialize STPWEN to 1 before we
4102 * restore the saved values. STPWEN is initialized
4103 * to a tri-state condition which can only be cleared
4104 * by turning it on.
4106 if ((ahc->features & AHC_TWIN) != 0) {
4107 u_int sblkctl;
4109 sblkctl = ahc_inb(ahc, SBLKCTL);
4110 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
4111 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
4112 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
4114 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
4116 error = 0;
4117 if (reinit != 0)
4119 * If a recovery action has forced a chip reset,
4120 * re-initialize the chip to our liking.
4122 error = ahc->bus_chip_init(ahc);
4123 #ifdef AHC_DUMP_SEQ
4124 else
4125 ahc_dumpseq(ahc);
4126 #endif
4128 return (error);
4132 * Determine the number of SCBs available on the controller
4135 ahc_probe_scbs(struct ahc_softc *ahc) {
4136 int i;
4138 for (i = 0; i < AHC_SCB_MAX; i++) {
4140 ahc_outb(ahc, SCBPTR, i);
4141 ahc_outb(ahc, SCB_BASE, i);
4142 if (ahc_inb(ahc, SCB_BASE) != i)
4143 break;
4144 ahc_outb(ahc, SCBPTR, 0);
4145 if (ahc_inb(ahc, SCB_BASE) != 0)
4146 break;
4148 return (i);
4151 static void
4152 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
4154 dma_addr_t *baddr;
4156 baddr = (dma_addr_t *)arg;
4157 *baddr = segs->ds_addr;
4160 static void
4161 ahc_build_free_scb_list(struct ahc_softc *ahc)
4163 int scbsize;
4164 int i;
4166 scbsize = 32;
4167 if ((ahc->flags & AHC_LSCBS_ENABLED) != 0)
4168 scbsize = 64;
4170 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
4171 int j;
4173 ahc_outb(ahc, SCBPTR, i);
4176 * Touch all SCB bytes to avoid parity errors
4177 * should one of our debugging routines read
4178 * an otherwise uninitiatlized byte.
4180 for (j = 0; j < scbsize; j++)
4181 ahc_outb(ahc, SCB_BASE+j, 0xFF);
4183 /* Clear the control byte. */
4184 ahc_outb(ahc, SCB_CONTROL, 0);
4186 /* Set the next pointer */
4187 if ((ahc->flags & AHC_PAGESCBS) != 0)
4188 ahc_outb(ahc, SCB_NEXT, i+1);
4189 else
4190 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4192 /* Make the tag number, SCSIID, and lun invalid */
4193 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
4194 ahc_outb(ahc, SCB_SCSIID, 0xFF);
4195 ahc_outb(ahc, SCB_LUN, 0xFF);
4198 if ((ahc->flags & AHC_PAGESCBS) != 0) {
4199 /* SCB 0 heads the free list. */
4200 ahc_outb(ahc, FREE_SCBH, 0);
4201 } else {
4202 /* No free list. */
4203 ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
4206 /* Make sure that the last SCB terminates the free list */
4207 ahc_outb(ahc, SCBPTR, i-1);
4208 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
4211 static int
4212 ahc_init_scbdata(struct ahc_softc *ahc)
4214 struct scb_data *scb_data;
4216 scb_data = ahc->scb_data;
4217 SLIST_INIT(&scb_data->free_scbs);
4218 SLIST_INIT(&scb_data->sg_maps);
4220 /* Allocate SCB resources */
4221 scb_data->scbarray =
4222 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC,
4223 M_DEVBUF, M_NOWAIT);
4224 if (scb_data->scbarray == NULL)
4225 return (ENOMEM);
4226 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC);
4228 /* Determine the number of hardware SCBs and initialize them */
4230 scb_data->maxhscbs = ahc_probe_scbs(ahc);
4231 if (ahc->scb_data->maxhscbs == 0) {
4232 printf("%s: No SCB space found\n", ahc_name(ahc));
4233 return (ENXIO);
4237 * Create our DMA tags. These tags define the kinds of device
4238 * accessible memory allocations and memory mappings we will
4239 * need to perform during normal operation.
4241 * Unless we need to further restrict the allocation, we rely
4242 * on the restrictions of the parent dmat, hence the common
4243 * use of MAXADDR and MAXSIZE.
4246 /* DMA tag for our hardware scb structures */
4247 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4248 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4249 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4250 /*highaddr*/BUS_SPACE_MAXADDR,
4251 /*filter*/NULL, /*filterarg*/NULL,
4252 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4253 /*nsegments*/1,
4254 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4255 /*flags*/0, &scb_data->hscb_dmat) != 0) {
4256 goto error_exit;
4259 scb_data->init_level++;
4261 /* Allocation for our hscbs */
4262 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
4263 (void **)&scb_data->hscbs,
4264 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
4265 goto error_exit;
4268 scb_data->init_level++;
4270 /* And permanently map them */
4271 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
4272 scb_data->hscbs,
4273 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
4274 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
4276 scb_data->init_level++;
4278 /* DMA tag for our sense buffers */
4279 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4280 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4281 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4282 /*highaddr*/BUS_SPACE_MAXADDR,
4283 /*filter*/NULL, /*filterarg*/NULL,
4284 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4285 /*nsegments*/1,
4286 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4287 /*flags*/0, &scb_data->sense_dmat) != 0) {
4288 goto error_exit;
4291 scb_data->init_level++;
4293 /* Allocate them */
4294 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
4295 (void **)&scb_data->sense,
4296 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
4297 goto error_exit;
4300 scb_data->init_level++;
4302 /* And permanently map them */
4303 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
4304 scb_data->sense,
4305 AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
4306 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
4308 scb_data->init_level++;
4310 /* DMA tag for our S/G structures. We allocate in page sized chunks */
4311 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8,
4312 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4313 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4314 /*highaddr*/BUS_SPACE_MAXADDR,
4315 /*filter*/NULL, /*filterarg*/NULL,
4316 PAGE_SIZE, /*nsegments*/1,
4317 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4318 /*flags*/0, &scb_data->sg_dmat) != 0) {
4319 goto error_exit;
4322 scb_data->init_level++;
4324 /* Perform initial CCB allocation */
4325 memset(scb_data->hscbs, 0,
4326 AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb));
4327 ahc_alloc_scbs(ahc);
4329 if (scb_data->numscbs == 0) {
4330 printf("%s: ahc_init_scbdata - "
4331 "Unable to allocate initial scbs\n",
4332 ahc_name(ahc));
4333 goto error_exit;
4337 * Reserve the next queued SCB.
4339 ahc->next_queued_scb = ahc_get_scb(ahc);
4342 * Note that we were successfull
4344 return (0);
4346 error_exit:
4348 return (ENOMEM);
4351 static void
4352 ahc_fini_scbdata(struct ahc_softc *ahc)
4354 struct scb_data *scb_data;
4356 scb_data = ahc->scb_data;
4357 if (scb_data == NULL)
4358 return;
4360 switch (scb_data->init_level) {
4361 default:
4362 case 7:
4364 struct sg_map_node *sg_map;
4366 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
4367 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
4368 ahc_dmamap_unload(ahc, scb_data->sg_dmat,
4369 sg_map->sg_dmamap);
4370 ahc_dmamem_free(ahc, scb_data->sg_dmat,
4371 sg_map->sg_vaddr,
4372 sg_map->sg_dmamap);
4373 free(sg_map, M_DEVBUF);
4375 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
4377 case 6:
4378 ahc_dmamap_unload(ahc, scb_data->sense_dmat,
4379 scb_data->sense_dmamap);
4380 case 5:
4381 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
4382 scb_data->sense_dmamap);
4383 ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
4384 scb_data->sense_dmamap);
4385 case 4:
4386 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
4387 case 3:
4388 ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
4389 scb_data->hscb_dmamap);
4390 case 2:
4391 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
4392 scb_data->hscb_dmamap);
4393 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
4394 scb_data->hscb_dmamap);
4395 case 1:
4396 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
4397 break;
4398 case 0:
4399 break;
4401 if (scb_data->scbarray != NULL)
4402 free(scb_data->scbarray, M_DEVBUF);
4405 void
4406 ahc_alloc_scbs(struct ahc_softc *ahc)
4408 struct scb_data *scb_data;
4409 struct scb *next_scb;
4410 struct sg_map_node *sg_map;
4411 dma_addr_t physaddr;
4412 struct ahc_dma_seg *segs;
4413 int newcount;
4414 int i;
4416 scb_data = ahc->scb_data;
4417 if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
4418 /* Can't allocate any more */
4419 return;
4421 next_scb = &scb_data->scbarray[scb_data->numscbs];
4423 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
4425 if (sg_map == NULL)
4426 return;
4428 /* Allocate S/G space for the next batch of SCBS */
4429 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
4430 (void **)&sg_map->sg_vaddr,
4431 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
4432 free(sg_map, M_DEVBUF);
4433 return;
4436 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
4438 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
4439 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
4440 &sg_map->sg_physaddr, /*flags*/0);
4442 segs = sg_map->sg_vaddr;
4443 physaddr = sg_map->sg_physaddr;
4445 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
4446 newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
4447 for (i = 0; i < newcount; i++) {
4448 struct scb_platform_data *pdata;
4449 #ifndef __linux__
4450 int error;
4451 #endif
4452 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata),
4453 M_DEVBUF, M_NOWAIT);
4454 if (pdata == NULL)
4455 break;
4456 next_scb->platform_data = pdata;
4457 next_scb->sg_map = sg_map;
4458 next_scb->sg_list = segs;
4460 * The sequencer always starts with the second entry.
4461 * The first entry is embedded in the scb.
4463 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
4464 next_scb->ahc_softc = ahc;
4465 next_scb->flags = SCB_FREE;
4466 #ifndef __linux__
4467 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
4468 &next_scb->dmamap);
4469 if (error != 0)
4470 break;
4471 #endif
4472 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
4473 next_scb->hscb->tag = ahc->scb_data->numscbs;
4474 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
4475 next_scb, links.sle);
4476 segs += AHC_NSEG;
4477 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
4478 next_scb++;
4479 ahc->scb_data->numscbs++;
4483 void
4484 ahc_controller_info(struct ahc_softc *ahc, char *buf)
4486 int len;
4488 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
4489 buf += len;
4490 if ((ahc->features & AHC_TWIN) != 0)
4491 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
4492 "B SCSI Id=%d, primary %c, ",
4493 ahc->our_id, ahc->our_id_b,
4494 (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
4495 else {
4496 const char *speed;
4497 const char *type;
4499 speed = "";
4500 if ((ahc->features & AHC_ULTRA) != 0) {
4501 speed = "Ultra ";
4502 } else if ((ahc->features & AHC_DT) != 0) {
4503 speed = "Ultra160 ";
4504 } else if ((ahc->features & AHC_ULTRA2) != 0) {
4505 speed = "Ultra2 ";
4507 if ((ahc->features & AHC_WIDE) != 0) {
4508 type = "Wide";
4509 } else {
4510 type = "Single";
4512 len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
4513 speed, type, ahc->channel, ahc->our_id);
4515 buf += len;
4517 if ((ahc->flags & AHC_PAGESCBS) != 0)
4518 sprintf(buf, "%d/%d SCBs",
4519 ahc->scb_data->maxhscbs, AHC_MAX_QUEUE);
4520 else
4521 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
4525 ahc_chip_init(struct ahc_softc *ahc)
4527 int term;
4528 int error;
4529 u_int i;
4530 u_int scsi_conf;
4531 u_int scsiseq_template;
4532 uint32_t physaddr;
4534 ahc_outb(ahc, SEQ_FLAGS, 0);
4535 ahc_outb(ahc, SEQ_FLAGS2, 0);
4537 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
4538 if (ahc->features & AHC_TWIN) {
4541 * Setup Channel B first.
4543 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
4544 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
4545 ahc_outb(ahc, SCSIID, ahc->our_id_b);
4546 scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4547 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4548 |term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
4549 if ((ahc->features & AHC_ULTRA2) != 0)
4550 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4551 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4552 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4554 /* Select Channel A */
4555 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
4557 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
4558 if ((ahc->features & AHC_ULTRA2) != 0)
4559 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
4560 else
4561 ahc_outb(ahc, SCSIID, ahc->our_id);
4562 scsi_conf = ahc_inb(ahc, SCSICONF);
4563 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
4564 |term|ahc->seltime
4565 |ENSTIMER|ACTNEGEN);
4566 if ((ahc->features & AHC_ULTRA2) != 0)
4567 ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
4568 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
4569 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
4571 /* There are no untagged SCBs active yet. */
4572 for (i = 0; i < 16; i++) {
4573 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
4574 if ((ahc->flags & AHC_SCB_BTT) != 0) {
4575 int lun;
4578 * The SCB based BTT allows an entry per
4579 * target and lun pair.
4581 for (lun = 1; lun < AHC_NUM_LUNS; lun++)
4582 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
4586 /* All of our queues are empty */
4587 for (i = 0; i < 256; i++)
4588 ahc->qoutfifo[i] = SCB_LIST_NULL;
4589 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
4591 for (i = 0; i < 256; i++)
4592 ahc->qinfifo[i] = SCB_LIST_NULL;
4594 if ((ahc->features & AHC_MULTI_TID) != 0) {
4595 ahc_outb(ahc, TARGID, 0);
4596 ahc_outb(ahc, TARGID + 1, 0);
4600 * Tell the sequencer where it can find our arrays in memory.
4602 physaddr = ahc->scb_data->hscb_busaddr;
4603 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
4604 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
4605 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
4606 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
4608 physaddr = ahc->shared_data_busaddr;
4609 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
4610 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
4611 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
4612 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
4615 * Initialize the group code to command length table.
4616 * This overrides the values in TARG_SCSIRATE, so only
4617 * setup the table after we have processed that information.
4619 ahc_outb(ahc, CMDSIZE_TABLE, 5);
4620 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
4621 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
4622 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
4623 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
4624 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
4625 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
4626 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
4628 if ((ahc->features & AHC_HS_MAILBOX) != 0)
4629 ahc_outb(ahc, HS_MAILBOX, 0);
4631 /* Tell the sequencer of our initial queue positions */
4632 if ((ahc->features & AHC_TARGETMODE) != 0) {
4633 ahc->tqinfifonext = 1;
4634 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
4635 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
4637 ahc->qinfifonext = 0;
4638 ahc->qoutfifonext = 0;
4639 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
4640 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
4641 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
4642 ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext);
4643 ahc_outb(ahc, SDSCB_QOFF, 0);
4644 } else {
4645 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
4646 ahc_outb(ahc, QINPOS, ahc->qinfifonext);
4647 ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext);
4650 /* We don't have any waiting selections */
4651 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
4653 /* Our disconnection list is empty too */
4654 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
4656 /* Message out buffer starts empty */
4657 ahc_outb(ahc, MSG_OUT, MSG_NOOP);
4660 * Setup the allowed SCSI Sequences based on operational mode.
4661 * If we are a target, we'll enalbe select in operations once
4662 * we've had a lun enabled.
4664 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
4665 if ((ahc->flags & AHC_INITIATORROLE) != 0)
4666 scsiseq_template |= ENRSELI;
4667 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
4669 /* Initialize our list of free SCBs. */
4670 ahc_build_free_scb_list(ahc);
4673 * Tell the sequencer which SCB will be the next one it receives.
4675 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
4678 * Load the Sequencer program and Enable the adapter
4679 * in "fast" mode.
4681 if (bootverbose)
4682 printf("%s: Downloading Sequencer Program...",
4683 ahc_name(ahc));
4685 error = ahc_loadseq(ahc);
4686 if (error != 0)
4687 return (error);
4689 if ((ahc->features & AHC_ULTRA2) != 0) {
4690 int wait;
4693 * Wait for up to 500ms for our transceivers
4694 * to settle. If the adapter does not have
4695 * a cable attached, the transceivers may
4696 * never settle, so don't complain if we
4697 * fail here.
4699 for (wait = 5000;
4700 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
4701 wait--)
4702 ahc_delay(100);
4704 ahc_restart(ahc);
4705 return (0);
4709 * Start the board, ready for normal operation
4712 ahc_init(struct ahc_softc *ahc)
4714 int max_targ;
4715 u_int i;
4716 u_int scsi_conf;
4717 u_int ultraenb;
4718 u_int discenable;
4719 u_int tagenable;
4720 size_t driver_data_size;
4722 #ifdef AHC_DEBUG
4723 if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0)
4724 ahc->flags |= AHC_SEQUENCER_DEBUG;
4725 #endif
4727 #ifdef AHC_PRINT_SRAM
4728 printf("Scratch Ram:");
4729 for (i = 0x20; i < 0x5f; i++) {
4730 if (((i % 8) == 0) && (i != 0)) {
4731 printf ("\n ");
4733 printf (" 0x%x", ahc_inb(ahc, i));
4735 if ((ahc->features & AHC_MORE_SRAM) != 0) {
4736 for (i = 0x70; i < 0x7f; i++) {
4737 if (((i % 8) == 0) && (i != 0)) {
4738 printf ("\n ");
4740 printf (" 0x%x", ahc_inb(ahc, i));
4743 printf ("\n");
4745 * Reading uninitialized scratch ram may
4746 * generate parity errors.
4748 ahc_outb(ahc, CLRINT, CLRPARERR);
4749 ahc_outb(ahc, CLRINT, CLRBRKADRINT);
4750 #endif
4751 max_targ = 15;
4754 * Assume we have a board at this stage and it has been reset.
4756 if ((ahc->flags & AHC_USEDEFAULTS) != 0)
4757 ahc->our_id = ahc->our_id_b = 7;
4760 * Default to allowing initiator operations.
4762 ahc->flags |= AHC_INITIATORROLE;
4765 * Only allow target mode features if this unit has them enabled.
4767 if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
4768 ahc->features &= ~AHC_TARGETMODE;
4770 #ifndef __linux__
4771 /* DMA tag for mapping buffers into device visible space. */
4772 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4773 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4774 /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
4775 ? (dma_addr_t)0x7FFFFFFFFFULL
4776 : BUS_SPACE_MAXADDR_32BIT,
4777 /*highaddr*/BUS_SPACE_MAXADDR,
4778 /*filter*/NULL, /*filterarg*/NULL,
4779 /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
4780 /*nsegments*/AHC_NSEG,
4781 /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
4782 /*flags*/BUS_DMA_ALLOCNOW,
4783 &ahc->buffer_dmat) != 0) {
4784 return (ENOMEM);
4786 #endif
4788 ahc->init_level++;
4791 * DMA tag for our command fifos and other data in system memory
4792 * the card's sequencer must be able to access. For initiator
4793 * roles, we need to allocate space for the qinfifo and qoutfifo.
4794 * The qinfifo and qoutfifo are composed of 256 1 byte elements.
4795 * When providing for the target mode role, we must additionally
4796 * provide space for the incoming target command fifo and an extra
4797 * byte to deal with a dma bug in some chip versions.
4799 driver_data_size = 2 * 256 * sizeof(uint8_t);
4800 if ((ahc->features & AHC_TARGETMODE) != 0)
4801 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
4802 + /*DMA WideOdd Bug Buffer*/1;
4803 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
4804 /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
4805 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
4806 /*highaddr*/BUS_SPACE_MAXADDR,
4807 /*filter*/NULL, /*filterarg*/NULL,
4808 driver_data_size,
4809 /*nsegments*/1,
4810 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
4811 /*flags*/0, &ahc->shared_data_dmat) != 0) {
4812 return (ENOMEM);
4815 ahc->init_level++;
4817 /* Allocation of driver data */
4818 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
4819 (void **)&ahc->qoutfifo,
4820 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
4821 return (ENOMEM);
4824 ahc->init_level++;
4826 /* And permanently map it in */
4827 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
4828 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
4829 &ahc->shared_data_busaddr, /*flags*/0);
4831 if ((ahc->features & AHC_TARGETMODE) != 0) {
4832 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
4833 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
4834 ahc->dma_bug_buf = ahc->shared_data_busaddr
4835 + driver_data_size - 1;
4836 /* All target command blocks start out invalid. */
4837 for (i = 0; i < AHC_TMODE_CMDS; i++)
4838 ahc->targetcmds[i].cmd_valid = 0;
4839 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
4840 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
4842 ahc->qinfifo = &ahc->qoutfifo[256];
4844 ahc->init_level++;
4846 /* Allocate SCB data now that buffer_dmat is initialized */
4847 if (ahc->scb_data->maxhscbs == 0)
4848 if (ahc_init_scbdata(ahc) != 0)
4849 return (ENOMEM);
4852 * Allocate a tstate to house information for our
4853 * initiator presence on the bus as well as the user
4854 * data for any target mode initiator.
4856 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
4857 printf("%s: unable to allocate ahc_tmode_tstate. "
4858 "Failing attach\n", ahc_name(ahc));
4859 return (ENOMEM);
4862 if ((ahc->features & AHC_TWIN) != 0) {
4863 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
4864 printf("%s: unable to allocate ahc_tmode_tstate. "
4865 "Failing attach\n", ahc_name(ahc));
4866 return (ENOMEM);
4870 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) {
4871 ahc->flags |= AHC_PAGESCBS;
4872 } else {
4873 ahc->flags &= ~AHC_PAGESCBS;
4876 #ifdef AHC_DEBUG
4877 if (ahc_debug & AHC_SHOW_MISC) {
4878 printf("%s: hardware scb %u bytes; kernel scb %u bytes; "
4879 "ahc_dma %u bytes\n",
4880 ahc_name(ahc),
4881 (u_int)sizeof(struct hardware_scb),
4882 (u_int)sizeof(struct scb),
4883 (u_int)sizeof(struct ahc_dma_seg));
4885 #endif /* AHC_DEBUG */
4888 * Look at the information that board initialization or
4889 * the board bios has left us.
4891 if (ahc->features & AHC_TWIN) {
4892 scsi_conf = ahc_inb(ahc, SCSICONF + 1);
4893 if ((scsi_conf & RESET_SCSI) != 0
4894 && (ahc->flags & AHC_INITIATORROLE) != 0)
4895 ahc->flags |= AHC_RESET_BUS_B;
4898 scsi_conf = ahc_inb(ahc, SCSICONF);
4899 if ((scsi_conf & RESET_SCSI) != 0
4900 && (ahc->flags & AHC_INITIATORROLE) != 0)
4901 ahc->flags |= AHC_RESET_BUS_A;
4903 ultraenb = 0;
4904 tagenable = ALL_TARGETS_MASK;
4906 /* Grab the disconnection disable table and invert it for our needs */
4907 if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
4908 printf("%s: Host Adapter Bios disabled. Using default SCSI "
4909 "device parameters\n", ahc_name(ahc));
4910 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
4911 AHC_TERM_ENB_A|AHC_TERM_ENB_B;
4912 discenable = ALL_TARGETS_MASK;
4913 if ((ahc->features & AHC_ULTRA) != 0)
4914 ultraenb = ALL_TARGETS_MASK;
4915 } else {
4916 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
4917 | ahc_inb(ahc, DISC_DSB));
4918 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
4919 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
4920 | ahc_inb(ahc, ULTRA_ENB);
4923 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
4924 max_targ = 7;
4926 for (i = 0; i <= max_targ; i++) {
4927 struct ahc_initiator_tinfo *tinfo;
4928 struct ahc_tmode_tstate *tstate;
4929 u_int our_id;
4930 u_int target_id;
4931 char channel;
4933 channel = 'A';
4934 our_id = ahc->our_id;
4935 target_id = i;
4936 if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
4937 channel = 'B';
4938 our_id = ahc->our_id_b;
4939 target_id = i % 8;
4941 tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
4942 target_id, &tstate);
4943 /* Default to async narrow across the board */
4944 memset(tinfo, 0, sizeof(*tinfo));
4945 if (ahc->flags & AHC_USEDEFAULTS) {
4946 if ((ahc->features & AHC_WIDE) != 0)
4947 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
4950 * These will be truncated when we determine the
4951 * connection type we have with the target.
4953 tinfo->user.period = ahc_syncrates->period;
4954 tinfo->user.offset = MAX_OFFSET;
4955 } else {
4956 u_int scsirate;
4957 uint16_t mask;
4959 /* Take the settings leftover in scratch RAM. */
4960 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
4961 mask = (0x01 << i);
4962 if ((ahc->features & AHC_ULTRA2) != 0) {
4963 u_int offset;
4964 u_int maxsync;
4966 if ((scsirate & SOFS) == 0x0F) {
4968 * Haven't negotiated yet,
4969 * so the format is different.
4971 scsirate = (scsirate & SXFR) >> 4
4972 | (ultraenb & mask)
4973 ? 0x08 : 0x0
4974 | (scsirate & WIDEXFER);
4975 offset = MAX_OFFSET_ULTRA2;
4976 } else
4977 offset = ahc_inb(ahc, TARG_OFFSET + i);
4978 if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
4979 /* Set to the lowest sync rate, 5MHz */
4980 scsirate |= 0x1c;
4981 maxsync = AHC_SYNCRATE_ULTRA2;
4982 if ((ahc->features & AHC_DT) != 0)
4983 maxsync = AHC_SYNCRATE_DT;
4984 tinfo->user.period =
4985 ahc_find_period(ahc, scsirate, maxsync);
4986 if (offset == 0)
4987 tinfo->user.period = 0;
4988 else
4989 tinfo->user.offset = MAX_OFFSET;
4990 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
4991 && (ahc->features & AHC_DT) != 0)
4992 tinfo->user.ppr_options =
4993 MSG_EXT_PPR_DT_REQ;
4994 } else if ((scsirate & SOFS) != 0) {
4995 if ((scsirate & SXFR) == 0x40
4996 && (ultraenb & mask) != 0) {
4997 /* Treat 10MHz as a non-ultra speed */
4998 scsirate &= ~SXFR;
4999 ultraenb &= ~mask;
5001 tinfo->user.period =
5002 ahc_find_period(ahc, scsirate,
5003 (ultraenb & mask)
5004 ? AHC_SYNCRATE_ULTRA
5005 : AHC_SYNCRATE_FAST);
5006 if (tinfo->user.period != 0)
5007 tinfo->user.offset = MAX_OFFSET;
5009 if (tinfo->user.period == 0)
5010 tinfo->user.offset = 0;
5011 if ((scsirate & WIDEXFER) != 0
5012 && (ahc->features & AHC_WIDE) != 0)
5013 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
5014 tinfo->user.protocol_version = 4;
5015 if ((ahc->features & AHC_DT) != 0)
5016 tinfo->user.transport_version = 3;
5017 else
5018 tinfo->user.transport_version = 2;
5019 tinfo->goal.protocol_version = 2;
5020 tinfo->goal.transport_version = 2;
5021 tinfo->curr.protocol_version = 2;
5022 tinfo->curr.transport_version = 2;
5024 tstate->ultraenb = 0;
5026 ahc->user_discenable = discenable;
5027 ahc->user_tagenable = tagenable;
5029 return (ahc->bus_chip_init(ahc));
5032 void
5033 ahc_intr_enable(struct ahc_softc *ahc, int enable)
5035 u_int hcntrl;
5037 hcntrl = ahc_inb(ahc, HCNTRL);
5038 hcntrl &= ~INTEN;
5039 ahc->pause &= ~INTEN;
5040 ahc->unpause &= ~INTEN;
5041 if (enable) {
5042 hcntrl |= INTEN;
5043 ahc->pause |= INTEN;
5044 ahc->unpause |= INTEN;
5046 ahc_outb(ahc, HCNTRL, hcntrl);
5050 * Ensure that the card is paused in a location
5051 * outside of all critical sections and that all
5052 * pending work is completed prior to returning.
5053 * This routine should only be called from outside
5054 * an interrupt context.
5056 void
5057 ahc_pause_and_flushwork(struct ahc_softc *ahc)
5059 int intstat;
5060 int maxloops;
5061 int paused;
5063 maxloops = 1000;
5064 ahc->flags |= AHC_ALL_INTERRUPTS;
5065 paused = FALSE;
5066 do {
5067 if (paused) {
5068 ahc_unpause(ahc);
5070 * Give the sequencer some time to service
5071 * any active selections.
5073 ahc_delay(500);
5075 ahc_intr(ahc);
5076 ahc_pause(ahc);
5077 paused = TRUE;
5078 ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
5079 intstat = ahc_inb(ahc, INTSTAT);
5080 if ((intstat & INT_PEND) == 0) {
5081 ahc_clear_critical_section(ahc);
5082 intstat = ahc_inb(ahc, INTSTAT);
5084 } while (--maxloops
5085 && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
5086 && ((intstat & INT_PEND) != 0
5087 || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0));
5088 if (maxloops == 0) {
5089 printf("Infinite interrupt loop, INTSTAT = %x",
5090 ahc_inb(ahc, INTSTAT));
5092 ahc_platform_flushwork(ahc);
5093 ahc->flags &= ~AHC_ALL_INTERRUPTS;
5096 #ifdef CONFIG_PM
5098 ahc_suspend(struct ahc_softc *ahc)
5101 ahc_pause_and_flushwork(ahc);
5103 if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
5104 ahc_unpause(ahc);
5105 return (EBUSY);
5108 #ifdef AHC_TARGET_MODE
5110 * XXX What about ATIOs that have not yet been serviced?
5111 * Perhaps we should just refuse to be suspended if we
5112 * are acting in a target role.
5114 if (ahc->pending_device != NULL) {
5115 ahc_unpause(ahc);
5116 return (EBUSY);
5118 #endif
5119 ahc_shutdown(ahc);
5120 return (0);
5124 ahc_resume(struct ahc_softc *ahc)
5127 ahc_reset(ahc, /*reinit*/TRUE);
5128 ahc_intr_enable(ahc, TRUE);
5129 ahc_restart(ahc);
5130 return (0);
5132 #endif
5133 /************************** Busy Target Table *********************************/
5135 * Return the untagged transaction id for a given target/channel lun.
5136 * Optionally, clear the entry.
5138 u_int
5139 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
5141 u_int scbid;
5142 u_int target_offset;
5144 if ((ahc->flags & AHC_SCB_BTT) != 0) {
5145 u_int saved_scbptr;
5147 saved_scbptr = ahc_inb(ahc, SCBPTR);
5148 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5149 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
5150 ahc_outb(ahc, SCBPTR, saved_scbptr);
5151 } else {
5152 target_offset = TCL_TARGET_OFFSET(tcl);
5153 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
5156 return (scbid);
5159 void
5160 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
5162 u_int target_offset;
5164 if ((ahc->flags & AHC_SCB_BTT) != 0) {
5165 u_int saved_scbptr;
5167 saved_scbptr = ahc_inb(ahc, SCBPTR);
5168 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5169 ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
5170 ahc_outb(ahc, SCBPTR, saved_scbptr);
5171 } else {
5172 target_offset = TCL_TARGET_OFFSET(tcl);
5173 ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
5177 void
5178 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
5180 u_int target_offset;
5182 if ((ahc->flags & AHC_SCB_BTT) != 0) {
5183 u_int saved_scbptr;
5185 saved_scbptr = ahc_inb(ahc, SCBPTR);
5186 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
5187 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
5188 ahc_outb(ahc, SCBPTR, saved_scbptr);
5189 } else {
5190 target_offset = TCL_TARGET_OFFSET(tcl);
5191 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
5195 /************************** SCB and SCB queue management **********************/
5197 ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
5198 char channel, int lun, u_int tag, role_t role)
5200 int targ = SCB_GET_TARGET(ahc, scb);
5201 char chan = SCB_GET_CHANNEL(ahc, scb);
5202 int slun = SCB_GET_LUN(scb);
5203 int match;
5205 match = ((chan == channel) || (channel == ALL_CHANNELS));
5206 if (match != 0)
5207 match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
5208 if (match != 0)
5209 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
5210 if (match != 0) {
5211 #ifdef AHC_TARGET_MODE
5212 int group;
5214 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
5215 if (role == ROLE_INITIATOR) {
5216 match = (group != XPT_FC_GROUP_TMODE)
5217 && ((tag == scb->hscb->tag)
5218 || (tag == SCB_LIST_NULL));
5219 } else if (role == ROLE_TARGET) {
5220 match = (group == XPT_FC_GROUP_TMODE)
5221 && ((tag == scb->io_ctx->csio.tag_id)
5222 || (tag == SCB_LIST_NULL));
5224 #else /* !AHC_TARGET_MODE */
5225 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
5226 #endif /* AHC_TARGET_MODE */
5229 return match;
5232 void
5233 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
5235 int target;
5236 char channel;
5237 int lun;
5239 target = SCB_GET_TARGET(ahc, scb);
5240 lun = SCB_GET_LUN(scb);
5241 channel = SCB_GET_CHANNEL(ahc, scb);
5243 ahc_search_qinfifo(ahc, target, channel, lun,
5244 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
5245 CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5247 ahc_platform_freeze_devq(ahc, scb);
5250 void
5251 ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
5253 struct scb *prev_scb;
5255 prev_scb = NULL;
5256 if (ahc_qinfifo_count(ahc) != 0) {
5257 u_int prev_tag;
5258 uint8_t prev_pos;
5260 prev_pos = ahc->qinfifonext - 1;
5261 prev_tag = ahc->qinfifo[prev_pos];
5262 prev_scb = ahc_lookup_scb(ahc, prev_tag);
5264 ahc_qinfifo_requeue(ahc, prev_scb, scb);
5265 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5266 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5267 } else {
5268 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5272 static void
5273 ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
5274 struct scb *scb)
5276 if (prev_scb == NULL) {
5277 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5278 } else {
5279 prev_scb->hscb->next = scb->hscb->tag;
5280 ahc_sync_scb(ahc, prev_scb,
5281 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5283 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
5284 scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5285 ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5288 static int
5289 ahc_qinfifo_count(struct ahc_softc *ahc)
5291 uint8_t qinpos;
5292 uint8_t diff;
5294 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5295 qinpos = ahc_inb(ahc, SNSCB_QOFF);
5296 ahc_outb(ahc, SNSCB_QOFF, qinpos);
5297 } else
5298 qinpos = ahc_inb(ahc, QINPOS);
5299 diff = ahc->qinfifonext - qinpos;
5300 return (diff);
5304 ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
5305 int lun, u_int tag, role_t role, uint32_t status,
5306 ahc_search_action action)
5308 struct scb *scb;
5309 struct scb *prev_scb;
5310 uint8_t qinstart;
5311 uint8_t qinpos;
5312 uint8_t qintail;
5313 uint8_t next;
5314 uint8_t prev;
5315 uint8_t curscbptr;
5316 int found;
5317 int have_qregs;
5319 qintail = ahc->qinfifonext;
5320 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
5321 if (have_qregs) {
5322 qinstart = ahc_inb(ahc, SNSCB_QOFF);
5323 ahc_outb(ahc, SNSCB_QOFF, qinstart);
5324 } else
5325 qinstart = ahc_inb(ahc, QINPOS);
5326 qinpos = qinstart;
5327 found = 0;
5328 prev_scb = NULL;
5330 if (action == SEARCH_COMPLETE) {
5332 * Don't attempt to run any queued untagged transactions
5333 * until we are done with the abort process.
5335 ahc_freeze_untagged_queues(ahc);
5339 * Start with an empty queue. Entries that are not chosen
5340 * for removal will be re-added to the queue as we go.
5342 ahc->qinfifonext = qinpos;
5343 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
5345 while (qinpos != qintail) {
5346 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
5347 if (scb == NULL) {
5348 printf("qinpos = %d, SCB index = %d\n",
5349 qinpos, ahc->qinfifo[qinpos]);
5350 panic("Loop 1\n");
5353 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
5355 * We found an scb that needs to be acted on.
5357 found++;
5358 switch (action) {
5359 case SEARCH_COMPLETE:
5361 cam_status ostat;
5362 cam_status cstat;
5364 ostat = ahc_get_transaction_status(scb);
5365 if (ostat == CAM_REQ_INPROG)
5366 ahc_set_transaction_status(scb, status);
5367 cstat = ahc_get_transaction_status(scb);
5368 if (cstat != CAM_REQ_CMP)
5369 ahc_freeze_scb(scb);
5370 if ((scb->flags & SCB_ACTIVE) == 0)
5371 printf("Inactive SCB in qinfifo\n");
5372 ahc_done(ahc, scb);
5374 /* FALLTHROUGH */
5376 case SEARCH_REMOVE:
5377 break;
5378 case SEARCH_COUNT:
5379 ahc_qinfifo_requeue(ahc, prev_scb, scb);
5380 prev_scb = scb;
5381 break;
5383 } else {
5384 ahc_qinfifo_requeue(ahc, prev_scb, scb);
5385 prev_scb = scb;
5387 qinpos++;
5390 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
5391 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
5392 } else {
5393 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
5396 if (action != SEARCH_COUNT
5397 && (found != 0)
5398 && (qinstart != ahc->qinfifonext)) {
5400 * The sequencer may be in the process of dmaing
5401 * down the SCB at the beginning of the queue.
5402 * This could be problematic if either the first,
5403 * or the second SCB is removed from the queue
5404 * (the first SCB includes a pointer to the "next"
5405 * SCB to dma). If we have removed any entries, swap
5406 * the first element in the queue with the next HSCB
5407 * so the sequencer will notice that NEXT_QUEUED_SCB
5408 * has changed during its dma attempt and will retry
5409 * the DMA.
5411 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
5413 if (scb == NULL) {
5414 printf("found = %d, qinstart = %d, qinfifionext = %d\n",
5415 found, qinstart, ahc->qinfifonext);
5416 panic("First/Second Qinfifo fixup\n");
5419 * ahc_swap_with_next_hscb forces our next pointer to
5420 * point to the reserved SCB for future commands. Save
5421 * and restore our original next pointer to maintain
5422 * queue integrity.
5424 next = scb->hscb->next;
5425 ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
5426 ahc_swap_with_next_hscb(ahc, scb);
5427 scb->hscb->next = next;
5428 ahc->qinfifo[qinstart] = scb->hscb->tag;
5430 /* Tell the card about the new head of the qinfifo. */
5431 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
5433 /* Fixup the tail "next" pointer. */
5434 qintail = ahc->qinfifonext - 1;
5435 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
5436 scb->hscb->next = ahc->next_queued_scb->hscb->tag;
5440 * Search waiting for selection list.
5442 curscbptr = ahc_inb(ahc, SCBPTR);
5443 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */
5444 prev = SCB_LIST_NULL;
5446 while (next != SCB_LIST_NULL) {
5447 uint8_t scb_index;
5449 ahc_outb(ahc, SCBPTR, next);
5450 scb_index = ahc_inb(ahc, SCB_TAG);
5451 if (scb_index >= ahc->scb_data->numscbs) {
5452 printf("Waiting List inconsistency. "
5453 "SCB index == %d, yet numscbs == %d.",
5454 scb_index, ahc->scb_data->numscbs);
5455 ahc_dump_card_state(ahc);
5456 panic("for safety");
5458 scb = ahc_lookup_scb(ahc, scb_index);
5459 if (scb == NULL) {
5460 printf("scb_index = %d, next = %d\n",
5461 scb_index, next);
5462 panic("Waiting List traversal\n");
5464 if (ahc_match_scb(ahc, scb, target, channel,
5465 lun, SCB_LIST_NULL, role)) {
5467 * We found an scb that needs to be acted on.
5469 found++;
5470 switch (action) {
5471 case SEARCH_COMPLETE:
5473 cam_status ostat;
5474 cam_status cstat;
5476 ostat = ahc_get_transaction_status(scb);
5477 if (ostat == CAM_REQ_INPROG)
5478 ahc_set_transaction_status(scb,
5479 status);
5480 cstat = ahc_get_transaction_status(scb);
5481 if (cstat != CAM_REQ_CMP)
5482 ahc_freeze_scb(scb);
5483 if ((scb->flags & SCB_ACTIVE) == 0)
5484 printf("Inactive SCB in Waiting List\n");
5485 ahc_done(ahc, scb);
5486 /* FALLTHROUGH */
5488 case SEARCH_REMOVE:
5489 next = ahc_rem_wscb(ahc, next, prev);
5490 break;
5491 case SEARCH_COUNT:
5492 prev = next;
5493 next = ahc_inb(ahc, SCB_NEXT);
5494 break;
5496 } else {
5498 prev = next;
5499 next = ahc_inb(ahc, SCB_NEXT);
5502 ahc_outb(ahc, SCBPTR, curscbptr);
5504 found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target,
5505 channel, lun, status, action);
5507 if (action == SEARCH_COMPLETE)
5508 ahc_release_untagged_queues(ahc);
5509 return (found);
5513 ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx,
5514 int target, char channel, int lun, uint32_t status,
5515 ahc_search_action action)
5517 struct scb *scb;
5518 int maxtarget;
5519 int found;
5520 int i;
5522 if (action == SEARCH_COMPLETE) {
5524 * Don't attempt to run any queued untagged transactions
5525 * until we are done with the abort process.
5527 ahc_freeze_untagged_queues(ahc);
5530 found = 0;
5531 i = 0;
5532 if ((ahc->flags & AHC_SCB_BTT) == 0) {
5534 maxtarget = 16;
5535 if (target != CAM_TARGET_WILDCARD) {
5537 i = target;
5538 if (channel == 'B')
5539 i += 8;
5540 maxtarget = i + 1;
5542 } else {
5543 maxtarget = 0;
5546 for (; i < maxtarget; i++) {
5547 struct scb_tailq *untagged_q;
5548 struct scb *next_scb;
5550 untagged_q = &(ahc->untagged_queues[i]);
5551 next_scb = TAILQ_FIRST(untagged_q);
5552 while (next_scb != NULL) {
5554 scb = next_scb;
5555 next_scb = TAILQ_NEXT(scb, links.tqe);
5558 * The head of the list may be the currently
5559 * active untagged command for a device.
5560 * We're only searching for commands that
5561 * have not been started. A transaction
5562 * marked active but still in the qinfifo
5563 * is removed by the qinfifo scanning code
5564 * above.
5566 if ((scb->flags & SCB_ACTIVE) != 0)
5567 continue;
5569 if (ahc_match_scb(ahc, scb, target, channel, lun,
5570 SCB_LIST_NULL, ROLE_INITIATOR) == 0
5571 || (ctx != NULL && ctx != scb->io_ctx))
5572 continue;
5575 * We found an scb that needs to be acted on.
5577 found++;
5578 switch (action) {
5579 case SEARCH_COMPLETE:
5581 cam_status ostat;
5582 cam_status cstat;
5584 ostat = ahc_get_transaction_status(scb);
5585 if (ostat == CAM_REQ_INPROG)
5586 ahc_set_transaction_status(scb, status);
5587 cstat = ahc_get_transaction_status(scb);
5588 if (cstat != CAM_REQ_CMP)
5589 ahc_freeze_scb(scb);
5590 if ((scb->flags & SCB_ACTIVE) == 0)
5591 printf("Inactive SCB in untaggedQ\n");
5592 ahc_done(ahc, scb);
5593 break;
5595 case SEARCH_REMOVE:
5596 scb->flags &= ~SCB_UNTAGGEDQ;
5597 TAILQ_REMOVE(untagged_q, scb, links.tqe);
5598 break;
5599 case SEARCH_COUNT:
5600 break;
5605 if (action == SEARCH_COMPLETE)
5606 ahc_release_untagged_queues(ahc);
5607 return (found);
5611 ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
5612 int lun, u_int tag, int stop_on_first, int remove,
5613 int save_state)
5615 struct scb *scbp;
5616 u_int next;
5617 u_int prev;
5618 u_int count;
5619 u_int active_scb;
5621 count = 0;
5622 next = ahc_inb(ahc, DISCONNECTED_SCBH);
5623 prev = SCB_LIST_NULL;
5625 if (save_state) {
5626 /* restore this when we're done */
5627 active_scb = ahc_inb(ahc, SCBPTR);
5628 } else
5629 /* Silence compiler */
5630 active_scb = SCB_LIST_NULL;
5632 while (next != SCB_LIST_NULL) {
5633 u_int scb_index;
5635 ahc_outb(ahc, SCBPTR, next);
5636 scb_index = ahc_inb(ahc, SCB_TAG);
5637 if (scb_index >= ahc->scb_data->numscbs) {
5638 printf("Disconnected List inconsistency. "
5639 "SCB index == %d, yet numscbs == %d.",
5640 scb_index, ahc->scb_data->numscbs);
5641 ahc_dump_card_state(ahc);
5642 panic("for safety");
5645 if (next == prev) {
5646 panic("Disconnected List Loop. "
5647 "cur SCBPTR == %x, prev SCBPTR == %x.",
5648 next, prev);
5650 scbp = ahc_lookup_scb(ahc, scb_index);
5651 if (ahc_match_scb(ahc, scbp, target, channel, lun,
5652 tag, ROLE_INITIATOR)) {
5653 count++;
5654 if (remove) {
5655 next =
5656 ahc_rem_scb_from_disc_list(ahc, prev, next);
5657 } else {
5658 prev = next;
5659 next = ahc_inb(ahc, SCB_NEXT);
5661 if (stop_on_first)
5662 break;
5663 } else {
5664 prev = next;
5665 next = ahc_inb(ahc, SCB_NEXT);
5668 if (save_state)
5669 ahc_outb(ahc, SCBPTR, active_scb);
5670 return (count);
5674 * Remove an SCB from the on chip list of disconnected transactions.
5675 * This is empty/unused if we are not performing SCB paging.
5677 static u_int
5678 ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
5680 u_int next;
5682 ahc_outb(ahc, SCBPTR, scbptr);
5683 next = ahc_inb(ahc, SCB_NEXT);
5685 ahc_outb(ahc, SCB_CONTROL, 0);
5687 ahc_add_curscb_to_free_list(ahc);
5689 if (prev != SCB_LIST_NULL) {
5690 ahc_outb(ahc, SCBPTR, prev);
5691 ahc_outb(ahc, SCB_NEXT, next);
5692 } else
5693 ahc_outb(ahc, DISCONNECTED_SCBH, next);
5695 return (next);
5699 * Add the SCB as selected by SCBPTR onto the on chip list of
5700 * free hardware SCBs. This list is empty/unused if we are not
5701 * performing SCB paging.
5703 static void
5704 ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
5707 * Invalidate the tag so that our abort
5708 * routines don't think it's active.
5710 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
5712 if ((ahc->flags & AHC_PAGESCBS) != 0) {
5713 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
5714 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
5719 * Manipulate the waiting for selection list and return the
5720 * scb that follows the one that we remove.
5722 static u_int
5723 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
5725 u_int curscb, next;
5728 * Select the SCB we want to abort and
5729 * pull the next pointer out of it.
5731 curscb = ahc_inb(ahc, SCBPTR);
5732 ahc_outb(ahc, SCBPTR, scbpos);
5733 next = ahc_inb(ahc, SCB_NEXT);
5735 /* Clear the necessary fields */
5736 ahc_outb(ahc, SCB_CONTROL, 0);
5738 ahc_add_curscb_to_free_list(ahc);
5740 /* update the waiting list */
5741 if (prev == SCB_LIST_NULL) {
5742 /* First in the list */
5743 ahc_outb(ahc, WAITING_SCBH, next);
5746 * Ensure we aren't attempting to perform
5747 * selection for this entry.
5749 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
5750 } else {
5752 * Select the scb that pointed to us
5753 * and update its next pointer.
5755 ahc_outb(ahc, SCBPTR, prev);
5756 ahc_outb(ahc, SCB_NEXT, next);
5760 * Point us back at the original scb position.
5762 ahc_outb(ahc, SCBPTR, curscb);
5763 return next;
5766 /******************************** Error Handling ******************************/
5768 * Abort all SCBs that match the given description (target/channel/lun/tag),
5769 * setting their status to the passed in status if the status has not already
5770 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
5771 * is paused before it is called.
5774 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
5775 int lun, u_int tag, role_t role, uint32_t status)
5777 struct scb *scbp;
5778 struct scb *scbp_next;
5779 u_int active_scb;
5780 int i, j;
5781 int maxtarget;
5782 int minlun;
5783 int maxlun;
5785 int found;
5788 * Don't attempt to run any queued untagged transactions
5789 * until we are done with the abort process.
5791 ahc_freeze_untagged_queues(ahc);
5793 /* restore this when we're done */
5794 active_scb = ahc_inb(ahc, SCBPTR);
5796 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
5797 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
5800 * Clean out the busy target table for any untagged commands.
5802 i = 0;
5803 maxtarget = 16;
5804 if (target != CAM_TARGET_WILDCARD) {
5805 i = target;
5806 if (channel == 'B')
5807 i += 8;
5808 maxtarget = i + 1;
5811 if (lun == CAM_LUN_WILDCARD) {
5814 * Unless we are using an SCB based
5815 * busy targets table, there is only
5816 * one table entry for all luns of
5817 * a target.
5819 minlun = 0;
5820 maxlun = 1;
5821 if ((ahc->flags & AHC_SCB_BTT) != 0)
5822 maxlun = AHC_NUM_LUNS;
5823 } else {
5824 minlun = lun;
5825 maxlun = lun + 1;
5828 if (role != ROLE_TARGET) {
5829 for (;i < maxtarget; i++) {
5830 for (j = minlun;j < maxlun; j++) {
5831 u_int scbid;
5832 u_int tcl;
5834 tcl = BUILD_TCL(i << 4, j);
5835 scbid = ahc_index_busy_tcl(ahc, tcl);
5836 scbp = ahc_lookup_scb(ahc, scbid);
5837 if (scbp == NULL
5838 || ahc_match_scb(ahc, scbp, target, channel,
5839 lun, tag, role) == 0)
5840 continue;
5841 ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
5846 * Go through the disconnected list and remove any entries we
5847 * have queued for completion, 0'ing their control byte too.
5848 * We save the active SCB and restore it ourselves, so there
5849 * is no reason for this search to restore it too.
5851 ahc_search_disc_list(ahc, target, channel, lun, tag,
5852 /*stop_on_first*/FALSE, /*remove*/TRUE,
5853 /*save_state*/FALSE);
5857 * Go through the hardware SCB array looking for commands that
5858 * were active but not on any list. In some cases, these remnants
5859 * might not still have mappings in the scbindex array (e.g. unexpected
5860 * bus free with the same scb queued for an abort). Don't hold this
5861 * against them.
5863 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
5864 u_int scbid;
5866 ahc_outb(ahc, SCBPTR, i);
5867 scbid = ahc_inb(ahc, SCB_TAG);
5868 scbp = ahc_lookup_scb(ahc, scbid);
5869 if ((scbp == NULL && scbid != SCB_LIST_NULL)
5870 || (scbp != NULL
5871 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)))
5872 ahc_add_curscb_to_free_list(ahc);
5876 * Go through the pending CCB list and look for
5877 * commands for this target that are still active.
5878 * These are other tagged commands that were
5879 * disconnected when the reset occurred.
5881 scbp_next = LIST_FIRST(&ahc->pending_scbs);
5882 while (scbp_next != NULL) {
5883 scbp = scbp_next;
5884 scbp_next = LIST_NEXT(scbp, pending_links);
5885 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
5886 cam_status ostat;
5888 ostat = ahc_get_transaction_status(scbp);
5889 if (ostat == CAM_REQ_INPROG)
5890 ahc_set_transaction_status(scbp, status);
5891 if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
5892 ahc_freeze_scb(scbp);
5893 if ((scbp->flags & SCB_ACTIVE) == 0)
5894 printf("Inactive SCB on pending list\n");
5895 ahc_done(ahc, scbp);
5896 found++;
5899 ahc_outb(ahc, SCBPTR, active_scb);
5900 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
5901 ahc_release_untagged_queues(ahc);
5902 return found;
5905 static void
5906 ahc_reset_current_bus(struct ahc_softc *ahc)
5908 uint8_t scsiseq;
5910 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
5911 scsiseq = ahc_inb(ahc, SCSISEQ);
5912 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
5913 ahc_flush_device_writes(ahc);
5914 ahc_delay(AHC_BUSRESET_DELAY);
5915 /* Turn off the bus reset */
5916 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
5918 ahc_clear_intstat(ahc);
5920 /* Re-enable reset interrupts */
5921 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
5925 ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
5927 struct ahc_devinfo devinfo;
5928 u_int initiator, target, max_scsiid;
5929 u_int sblkctl;
5930 u_int scsiseq;
5931 u_int simode1;
5932 int found;
5933 int restart_needed;
5934 char cur_channel;
5936 ahc->pending_device = NULL;
5938 ahc_compile_devinfo(&devinfo,
5939 CAM_TARGET_WILDCARD,
5940 CAM_TARGET_WILDCARD,
5941 CAM_LUN_WILDCARD,
5942 channel, ROLE_UNKNOWN);
5943 ahc_pause(ahc);
5945 /* Make sure the sequencer is in a safe location. */
5946 ahc_clear_critical_section(ahc);
5949 * Run our command complete fifos to ensure that we perform
5950 * completion processing on any commands that 'completed'
5951 * before the reset occurred.
5953 ahc_run_qoutfifo(ahc);
5954 #ifdef AHC_TARGET_MODE
5956 * XXX - In Twin mode, the tqinfifo may have commands
5957 * for an unaffected channel in it. However, if
5958 * we have run out of ATIO resources to drain that
5959 * queue, we may not get them all out here. Further,
5960 * the blocked transactions for the reset channel
5961 * should just be killed off, irrespecitve of whether
5962 * we are blocked on ATIO resources. Write a routine
5963 * to compact the tqinfifo appropriately.
5965 if ((ahc->flags & AHC_TARGETROLE) != 0) {
5966 ahc_run_tqinfifo(ahc, /*paused*/TRUE);
5968 #endif
5971 * Reset the bus if we are initiating this reset
5973 sblkctl = ahc_inb(ahc, SBLKCTL);
5974 cur_channel = 'A';
5975 if ((ahc->features & AHC_TWIN) != 0
5976 && ((sblkctl & SELBUSB) != 0))
5977 cur_channel = 'B';
5978 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
5979 if (cur_channel != channel) {
5980 /* Case 1: Command for another bus is active
5981 * Stealthily reset the other bus without
5982 * upsetting the current bus.
5984 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
5985 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
5986 #ifdef AHC_TARGET_MODE
5988 * Bus resets clear ENSELI, so we cannot
5989 * defer re-enabling bus reset interrupts
5990 * if we are in target mode.
5992 if ((ahc->flags & AHC_TARGETROLE) != 0)
5993 simode1 |= ENSCSIRST;
5994 #endif
5995 ahc_outb(ahc, SIMODE1, simode1);
5996 if (initiate_reset)
5997 ahc_reset_current_bus(ahc);
5998 ahc_clear_intstat(ahc);
5999 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
6000 ahc_outb(ahc, SBLKCTL, sblkctl);
6001 restart_needed = FALSE;
6002 } else {
6003 /* Case 2: A command from this bus is active or we're idle */
6004 simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
6005 #ifdef AHC_TARGET_MODE
6007 * Bus resets clear ENSELI, so we cannot
6008 * defer re-enabling bus reset interrupts
6009 * if we are in target mode.
6011 if ((ahc->flags & AHC_TARGETROLE) != 0)
6012 simode1 |= ENSCSIRST;
6013 #endif
6014 ahc_outb(ahc, SIMODE1, simode1);
6015 if (initiate_reset)
6016 ahc_reset_current_bus(ahc);
6017 ahc_clear_intstat(ahc);
6018 ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
6019 restart_needed = TRUE;
6023 * Clean up all the state information for the
6024 * pending transactions on this bus.
6026 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
6027 CAM_LUN_WILDCARD, SCB_LIST_NULL,
6028 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
6030 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
6032 #ifdef AHC_TARGET_MODE
6034 * Send an immediate notify ccb to all target more peripheral
6035 * drivers affected by this action.
6037 for (target = 0; target <= max_scsiid; target++) {
6038 struct ahc_tmode_tstate* tstate;
6039 u_int lun;
6041 tstate = ahc->enabled_targets[target];
6042 if (tstate == NULL)
6043 continue;
6044 for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
6045 struct ahc_tmode_lstate* lstate;
6047 lstate = tstate->enabled_luns[lun];
6048 if (lstate == NULL)
6049 continue;
6051 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
6052 EVENT_TYPE_BUS_RESET, /*arg*/0);
6053 ahc_send_lstate_events(ahc, lstate);
6056 #endif
6057 /* Notify the XPT that a bus reset occurred */
6058 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
6059 CAM_LUN_WILDCARD, AC_BUS_RESET);
6062 * Revert to async/narrow transfers until we renegotiate.
6064 for (target = 0; target <= max_scsiid; target++) {
6066 if (ahc->enabled_targets[target] == NULL)
6067 continue;
6068 for (initiator = 0; initiator <= max_scsiid; initiator++) {
6069 struct ahc_devinfo devinfo;
6071 ahc_compile_devinfo(&devinfo, target, initiator,
6072 CAM_LUN_WILDCARD,
6073 channel, ROLE_UNKNOWN);
6074 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
6075 AHC_TRANS_CUR, /*paused*/TRUE);
6076 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
6077 /*period*/0, /*offset*/0,
6078 /*ppr_options*/0, AHC_TRANS_CUR,
6079 /*paused*/TRUE);
6083 if (restart_needed)
6084 ahc_restart(ahc);
6085 else
6086 ahc_unpause(ahc);
6087 return found;
6091 /***************************** Residual Processing ****************************/
6093 * Calculate the residual for a just completed SCB.
6095 void
6096 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
6098 struct hardware_scb *hscb;
6099 struct status_pkt *spkt;
6100 uint32_t sgptr;
6101 uint32_t resid_sgptr;
6102 uint32_t resid;
6105 * 5 cases.
6106 * 1) No residual.
6107 * SG_RESID_VALID clear in sgptr.
6108 * 2) Transferless command
6109 * 3) Never performed any transfers.
6110 * sgptr has SG_FULL_RESID set.
6111 * 4) No residual but target did not
6112 * save data pointers after the
6113 * last transfer, so sgptr was
6114 * never updated.
6115 * 5) We have a partial residual.
6116 * Use residual_sgptr to determine
6117 * where we are.
6120 hscb = scb->hscb;
6121 sgptr = ahc_le32toh(hscb->sgptr);
6122 if ((sgptr & SG_RESID_VALID) == 0)
6123 /* Case 1 */
6124 return;
6125 sgptr &= ~SG_RESID_VALID;
6127 if ((sgptr & SG_LIST_NULL) != 0)
6128 /* Case 2 */
6129 return;
6131 spkt = &hscb->shared_data.status;
6132 resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
6133 if ((sgptr & SG_FULL_RESID) != 0) {
6134 /* Case 3 */
6135 resid = ahc_get_transfer_length(scb);
6136 } else if ((resid_sgptr & SG_LIST_NULL) != 0) {
6137 /* Case 4 */
6138 return;
6139 } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
6140 panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
6141 } else {
6142 struct ahc_dma_seg *sg;
6145 * Remainder of the SG where the transfer
6146 * stopped.
6148 resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
6149 sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
6151 /* The residual sg_ptr always points to the next sg */
6152 sg--;
6155 * Add up the contents of all residual
6156 * SG segments that are after the SG where
6157 * the transfer stopped.
6159 while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
6160 sg++;
6161 resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
6164 if ((scb->flags & SCB_SENSE) == 0)
6165 ahc_set_residual(scb, resid);
6166 else
6167 ahc_set_sense_residual(scb, resid);
6169 #ifdef AHC_DEBUG
6170 if ((ahc_debug & AHC_SHOW_MISC) != 0) {
6171 ahc_print_path(ahc, scb);
6172 printf("Handled %sResidual of %d bytes\n",
6173 (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
6175 #endif
6178 /******************************* Target Mode **********************************/
6179 #ifdef AHC_TARGET_MODE
6181 * Add a target mode event to this lun's queue
6183 static void
6184 ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
6185 u_int initiator_id, u_int event_type, u_int event_arg)
6187 struct ahc_tmode_event *event;
6188 int pending;
6190 xpt_freeze_devq(lstate->path, /*count*/1);
6191 if (lstate->event_w_idx >= lstate->event_r_idx)
6192 pending = lstate->event_w_idx - lstate->event_r_idx;
6193 else
6194 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
6195 - (lstate->event_r_idx - lstate->event_w_idx);
6197 if (event_type == EVENT_TYPE_BUS_RESET
6198 || event_type == MSG_BUS_DEV_RESET) {
6200 * Any earlier events are irrelevant, so reset our buffer.
6201 * This has the effect of allowing us to deal with reset
6202 * floods (an external device holding down the reset line)
6203 * without losing the event that is really interesting.
6205 lstate->event_r_idx = 0;
6206 lstate->event_w_idx = 0;
6207 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
6210 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
6211 xpt_print_path(lstate->path);
6212 printf("immediate event %x:%x lost\n",
6213 lstate->event_buffer[lstate->event_r_idx].event_type,
6214 lstate->event_buffer[lstate->event_r_idx].event_arg);
6215 lstate->event_r_idx++;
6216 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6217 lstate->event_r_idx = 0;
6218 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
6221 event = &lstate->event_buffer[lstate->event_w_idx];
6222 event->initiator_id = initiator_id;
6223 event->event_type = event_type;
6224 event->event_arg = event_arg;
6225 lstate->event_w_idx++;
6226 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6227 lstate->event_w_idx = 0;
6231 * Send any target mode events queued up waiting
6232 * for immediate notify resources.
6234 void
6235 ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
6237 struct ccb_hdr *ccbh;
6238 struct ccb_immed_notify *inot;
6240 while (lstate->event_r_idx != lstate->event_w_idx
6241 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
6242 struct ahc_tmode_event *event;
6244 event = &lstate->event_buffer[lstate->event_r_idx];
6245 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
6246 inot = (struct ccb_immed_notify *)ccbh;
6247 switch (event->event_type) {
6248 case EVENT_TYPE_BUS_RESET:
6249 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
6250 break;
6251 default:
6252 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
6253 inot->message_args[0] = event->event_type;
6254 inot->message_args[1] = event->event_arg;
6255 break;
6257 inot->initiator_id = event->initiator_id;
6258 inot->sense_len = 0;
6259 xpt_done((union ccb *)inot);
6260 lstate->event_r_idx++;
6261 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
6262 lstate->event_r_idx = 0;
6265 #endif
6267 /******************** Sequencer Program Patching/Download *********************/
6269 #ifdef AHC_DUMP_SEQ
6270 void
6271 ahc_dumpseq(struct ahc_softc* ahc)
6273 int i;
6275 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6276 ahc_outb(ahc, SEQADDR0, 0);
6277 ahc_outb(ahc, SEQADDR1, 0);
6278 for (i = 0; i < ahc->instruction_ram_size; i++) {
6279 uint8_t ins_bytes[4];
6281 ahc_insb(ahc, SEQRAM, ins_bytes, 4);
6282 printf("0x%08x\n", ins_bytes[0] << 24
6283 | ins_bytes[1] << 16
6284 | ins_bytes[2] << 8
6285 | ins_bytes[3]);
6288 #endif
6290 static int
6291 ahc_loadseq(struct ahc_softc *ahc)
6293 struct cs cs_table[num_critical_sections];
6294 u_int begin_set[num_critical_sections];
6295 u_int end_set[num_critical_sections];
6296 struct patch *cur_patch;
6297 u_int cs_count;
6298 u_int cur_cs;
6299 u_int i;
6300 u_int skip_addr;
6301 u_int sg_prefetch_cnt;
6302 int downloaded;
6303 uint8_t download_consts[7];
6306 * Start out with 0 critical sections
6307 * that apply to this firmware load.
6309 cs_count = 0;
6310 cur_cs = 0;
6311 memset(begin_set, 0, sizeof(begin_set));
6312 memset(end_set, 0, sizeof(end_set));
6314 /* Setup downloadable constant table */
6315 download_consts[QOUTFIFO_OFFSET] = 0;
6316 if (ahc->targetcmds != NULL)
6317 download_consts[QOUTFIFO_OFFSET] += 32;
6318 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
6319 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
6320 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
6321 sg_prefetch_cnt = ahc->pci_cachesize;
6322 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
6323 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
6324 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
6325 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
6326 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
6328 cur_patch = patches;
6329 downloaded = 0;
6330 skip_addr = 0;
6331 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
6332 ahc_outb(ahc, SEQADDR0, 0);
6333 ahc_outb(ahc, SEQADDR1, 0);
6335 for (i = 0; i < sizeof(seqprog)/4; i++) {
6336 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
6338 * Don't download this instruction as it
6339 * is in a patch that was removed.
6341 continue;
6344 if (downloaded == ahc->instruction_ram_size) {
6346 * We're about to exceed the instruction
6347 * storage capacity for this chip. Fail
6348 * the load.
6350 printf("\n%s: Program too large for instruction memory "
6351 "size of %d!\n", ahc_name(ahc),
6352 ahc->instruction_ram_size);
6353 return (ENOMEM);
6357 * Move through the CS table until we find a CS
6358 * that might apply to this instruction.
6360 for (; cur_cs < num_critical_sections; cur_cs++) {
6361 if (critical_sections[cur_cs].end <= i) {
6362 if (begin_set[cs_count] == TRUE
6363 && end_set[cs_count] == FALSE) {
6364 cs_table[cs_count].end = downloaded;
6365 end_set[cs_count] = TRUE;
6366 cs_count++;
6368 continue;
6370 if (critical_sections[cur_cs].begin <= i
6371 && begin_set[cs_count] == FALSE) {
6372 cs_table[cs_count].begin = downloaded;
6373 begin_set[cs_count] = TRUE;
6375 break;
6377 ahc_download_instr(ahc, i, download_consts);
6378 downloaded++;
6381 ahc->num_critical_sections = cs_count;
6382 if (cs_count != 0) {
6384 cs_count *= sizeof(struct cs);
6385 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT);
6386 if (ahc->critical_sections == NULL)
6387 panic("ahc_loadseq: Could not malloc");
6388 memcpy(ahc->critical_sections, cs_table, cs_count);
6390 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
6392 if (bootverbose) {
6393 printf(" %d instructions downloaded\n", downloaded);
6394 printf("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
6395 ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags);
6397 return (0);
6400 static int
6401 ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
6402 u_int start_instr, u_int *skip_addr)
6404 struct patch *cur_patch;
6405 struct patch *last_patch;
6406 u_int num_patches;
6408 num_patches = ARRAY_SIZE(patches);
6409 last_patch = &patches[num_patches];
6410 cur_patch = *start_patch;
6412 while (cur_patch < last_patch && start_instr == cur_patch->begin) {
6414 if (cur_patch->patch_func(ahc) == 0) {
6416 /* Start rejecting code */
6417 *skip_addr = start_instr + cur_patch->skip_instr;
6418 cur_patch += cur_patch->skip_patch;
6419 } else {
6420 /* Accepted this patch. Advance to the next
6421 * one and wait for our intruction pointer to
6422 * hit this point.
6424 cur_patch++;
6428 *start_patch = cur_patch;
6429 if (start_instr < *skip_addr)
6430 /* Still skipping */
6431 return (0);
6433 return (1);
6436 static void
6437 ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
6439 union ins_formats instr;
6440 struct ins_format1 *fmt1_ins;
6441 struct ins_format3 *fmt3_ins;
6442 u_int opcode;
6445 * The firmware is always compiled into a little endian format.
6447 instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
6449 fmt1_ins = &instr.format1;
6450 fmt3_ins = NULL;
6452 /* Pull the opcode */
6453 opcode = instr.format1.opcode;
6454 switch (opcode) {
6455 case AIC_OP_JMP:
6456 case AIC_OP_JC:
6457 case AIC_OP_JNC:
6458 case AIC_OP_CALL:
6459 case AIC_OP_JNE:
6460 case AIC_OP_JNZ:
6461 case AIC_OP_JE:
6462 case AIC_OP_JZ:
6464 struct patch *cur_patch;
6465 int address_offset;
6466 u_int address;
6467 u_int skip_addr;
6468 u_int i;
6470 fmt3_ins = &instr.format3;
6471 address_offset = 0;
6472 address = fmt3_ins->address;
6473 cur_patch = patches;
6474 skip_addr = 0;
6476 for (i = 0; i < address;) {
6478 ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
6480 if (skip_addr > i) {
6481 int end_addr;
6483 end_addr = min(address, skip_addr);
6484 address_offset += end_addr - i;
6485 i = skip_addr;
6486 } else {
6487 i++;
6490 address -= address_offset;
6491 fmt3_ins->address = address;
6492 /* FALLTHROUGH */
6494 case AIC_OP_OR:
6495 case AIC_OP_AND:
6496 case AIC_OP_XOR:
6497 case AIC_OP_ADD:
6498 case AIC_OP_ADC:
6499 case AIC_OP_BMOV:
6500 if (fmt1_ins->parity != 0) {
6501 fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
6503 fmt1_ins->parity = 0;
6504 if ((ahc->features & AHC_CMD_CHAN) == 0
6505 && opcode == AIC_OP_BMOV) {
6507 * Block move was added at the same time
6508 * as the command channel. Verify that
6509 * this is only a move of a single element
6510 * and convert the BMOV to a MOV
6511 * (AND with an immediate of FF).
6513 if (fmt1_ins->immediate != 1)
6514 panic("%s: BMOV not supported\n",
6515 ahc_name(ahc));
6516 fmt1_ins->opcode = AIC_OP_AND;
6517 fmt1_ins->immediate = 0xff;
6519 /* FALLTHROUGH */
6520 case AIC_OP_ROL:
6521 if ((ahc->features & AHC_ULTRA2) != 0) {
6522 int i, count;
6524 /* Calculate odd parity for the instruction */
6525 for (i = 0, count = 0; i < 31; i++) {
6526 uint32_t mask;
6528 mask = 0x01 << i;
6529 if ((instr.integer & mask) != 0)
6530 count++;
6532 if ((count & 0x01) == 0)
6533 instr.format1.parity = 1;
6534 } else {
6535 /* Compress the instruction for older sequencers */
6536 if (fmt3_ins != NULL) {
6537 instr.integer =
6538 fmt3_ins->immediate
6539 | (fmt3_ins->source << 8)
6540 | (fmt3_ins->address << 16)
6541 | (fmt3_ins->opcode << 25);
6542 } else {
6543 instr.integer =
6544 fmt1_ins->immediate
6545 | (fmt1_ins->source << 8)
6546 | (fmt1_ins->destination << 16)
6547 | (fmt1_ins->ret << 24)
6548 | (fmt1_ins->opcode << 25);
6551 /* The sequencer is a little endian cpu */
6552 instr.integer = ahc_htole32(instr.integer);
6553 ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
6554 break;
6555 default:
6556 panic("Unknown opcode encountered in seq program");
6557 break;
6562 ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
6563 const char *name, u_int address, u_int value,
6564 u_int *cur_column, u_int wrap_point)
6566 int printed;
6567 u_int printed_mask;
6569 if (cur_column != NULL && *cur_column >= wrap_point) {
6570 printf("\n");
6571 *cur_column = 0;
6573 printed = printf("%s[0x%x]", name, value);
6574 if (table == NULL) {
6575 printed += printf(" ");
6576 *cur_column += printed;
6577 return (printed);
6579 printed_mask = 0;
6580 while (printed_mask != 0xFF) {
6581 int entry;
6583 for (entry = 0; entry < num_entries; entry++) {
6584 if (((value & table[entry].mask)
6585 != table[entry].value)
6586 || ((printed_mask & table[entry].mask)
6587 == table[entry].mask))
6588 continue;
6590 printed += printf("%s%s",
6591 printed_mask == 0 ? ":(" : "|",
6592 table[entry].name);
6593 printed_mask |= table[entry].mask;
6595 break;
6597 if (entry >= num_entries)
6598 break;
6600 if (printed_mask != 0)
6601 printed += printf(") ");
6602 else
6603 printed += printf(" ");
6604 if (cur_column != NULL)
6605 *cur_column += printed;
6606 return (printed);
6609 void
6610 ahc_dump_card_state(struct ahc_softc *ahc)
6612 struct scb *scb;
6613 struct scb_tailq *untagged_q;
6614 u_int cur_col;
6615 int paused;
6616 int target;
6617 int maxtarget;
6618 int i;
6619 uint8_t last_phase;
6620 uint8_t qinpos;
6621 uint8_t qintail;
6622 uint8_t qoutpos;
6623 uint8_t scb_index;
6624 uint8_t saved_scbptr;
6626 if (ahc_is_paused(ahc)) {
6627 paused = 1;
6628 } else {
6629 paused = 0;
6630 ahc_pause(ahc);
6633 saved_scbptr = ahc_inb(ahc, SCBPTR);
6634 last_phase = ahc_inb(ahc, LASTPHASE);
6635 printf(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
6636 "%s: Dumping Card State %s, at SEQADDR 0x%x\n",
6637 ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
6638 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
6639 if (paused)
6640 printf("Card was paused\n");
6641 printf("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
6642 ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
6643 ahc_inb(ahc, ARG_2));
6644 printf("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
6645 ahc_inb(ahc, SCBPTR));
6646 cur_col = 0;
6647 if ((ahc->features & AHC_DT) != 0)
6648 ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50);
6649 ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50);
6650 ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50);
6651 ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50);
6652 ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50);
6653 ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50);
6654 ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50);
6655 ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50);
6656 ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50);
6657 ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50);
6658 ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50);
6659 ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50);
6660 ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50);
6661 ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50);
6662 ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50);
6663 ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50);
6664 ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50);
6665 ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50);
6666 ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50);
6667 if (cur_col != 0)
6668 printf("\n");
6669 printf("STACK:");
6670 for (i = 0; i < STACK_SIZE; i++)
6671 printf(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8));
6672 printf("\nSCB count = %d\n", ahc->scb_data->numscbs);
6673 printf("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
6674 printf("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
6675 /* QINFIFO */
6676 printf("QINFIFO entries: ");
6677 if ((ahc->features & AHC_QUEUE_REGS) != 0) {
6678 qinpos = ahc_inb(ahc, SNSCB_QOFF);
6679 ahc_outb(ahc, SNSCB_QOFF, qinpos);
6680 } else
6681 qinpos = ahc_inb(ahc, QINPOS);
6682 qintail = ahc->qinfifonext;
6683 while (qinpos != qintail) {
6684 printf("%d ", ahc->qinfifo[qinpos]);
6685 qinpos++;
6687 printf("\n");
6689 printf("Waiting Queue entries: ");
6690 scb_index = ahc_inb(ahc, WAITING_SCBH);
6691 i = 0;
6692 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6693 ahc_outb(ahc, SCBPTR, scb_index);
6694 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6695 scb_index = ahc_inb(ahc, SCB_NEXT);
6697 printf("\n");
6699 printf("Disconnected Queue entries: ");
6700 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
6701 i = 0;
6702 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6703 ahc_outb(ahc, SCBPTR, scb_index);
6704 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
6705 scb_index = ahc_inb(ahc, SCB_NEXT);
6707 printf("\n");
6709 ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
6710 printf("QOUTFIFO entries: ");
6711 qoutpos = ahc->qoutfifonext;
6712 i = 0;
6713 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
6714 printf("%d ", ahc->qoutfifo[qoutpos]);
6715 qoutpos++;
6717 printf("\n");
6719 printf("Sequencer Free SCB List: ");
6720 scb_index = ahc_inb(ahc, FREE_SCBH);
6721 i = 0;
6722 while (scb_index != SCB_LIST_NULL && i++ < 256) {
6723 ahc_outb(ahc, SCBPTR, scb_index);
6724 printf("%d ", scb_index);
6725 scb_index = ahc_inb(ahc, SCB_NEXT);
6727 printf("\n");
6729 printf("Sequencer SCB Info: ");
6730 for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
6731 ahc_outb(ahc, SCBPTR, i);
6732 cur_col = printf("\n%3d ", i);
6734 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60);
6735 ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60);
6736 ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60);
6737 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
6739 printf("\n");
6741 printf("Pending list: ");
6742 i = 0;
6743 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
6744 if (i++ > 256)
6745 break;
6746 cur_col = printf("\n%3d ", scb->hscb->tag);
6747 ahc_scb_control_print(scb->hscb->control, &cur_col, 60);
6748 ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60);
6749 ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60);
6750 if ((ahc->flags & AHC_PAGESCBS) == 0) {
6751 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
6752 printf("(");
6753 ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL),
6754 &cur_col, 60);
6755 ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
6756 printf(")");
6759 printf("\n");
6761 printf("Kernel Free SCB list: ");
6762 i = 0;
6763 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
6764 if (i++ > 256)
6765 break;
6766 printf("%d ", scb->hscb->tag);
6768 printf("\n");
6770 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
6771 for (target = 0; target <= maxtarget; target++) {
6772 untagged_q = &ahc->untagged_queues[target];
6773 if (TAILQ_FIRST(untagged_q) == NULL)
6774 continue;
6775 printf("Untagged Q(%d): ", target);
6776 i = 0;
6777 TAILQ_FOREACH(scb, untagged_q, links.tqe) {
6778 if (i++ > 256)
6779 break;
6780 printf("%d ", scb->hscb->tag);
6782 printf("\n");
6785 ahc_platform_dump_card_state(ahc);
6786 printf("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
6787 ahc_outb(ahc, SCBPTR, saved_scbptr);
6788 if (paused == 0)
6789 ahc_unpause(ahc);
6792 /************************* Target Mode ****************************************/
6793 #ifdef AHC_TARGET_MODE
6794 cam_status
6795 ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
6796 struct ahc_tmode_tstate **tstate,
6797 struct ahc_tmode_lstate **lstate,
6798 int notfound_failure)
6801 if ((ahc->features & AHC_TARGETMODE) == 0)
6802 return (CAM_REQ_INVALID);
6805 * Handle the 'black hole' device that sucks up
6806 * requests to unattached luns on enabled targets.
6808 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
6809 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
6810 *tstate = NULL;
6811 *lstate = ahc->black_hole;
6812 } else {
6813 u_int max_id;
6815 max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
6816 if (ccb->ccb_h.target_id >= max_id)
6817 return (CAM_TID_INVALID);
6819 if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
6820 return (CAM_LUN_INVALID);
6822 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
6823 *lstate = NULL;
6824 if (*tstate != NULL)
6825 *lstate =
6826 (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
6829 if (notfound_failure != 0 && *lstate == NULL)
6830 return (CAM_PATH_INVALID);
6832 return (CAM_REQ_CMP);
6835 void
6836 ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
6838 struct ahc_tmode_tstate *tstate;
6839 struct ahc_tmode_lstate *lstate;
6840 struct ccb_en_lun *cel;
6841 cam_status status;
6842 u_long s;
6843 u_int target;
6844 u_int lun;
6845 u_int target_mask;
6846 u_int our_id;
6847 int error;
6848 char channel;
6850 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
6851 /*notfound_failure*/FALSE);
6853 if (status != CAM_REQ_CMP) {
6854 ccb->ccb_h.status = status;
6855 return;
6858 if (cam_sim_bus(sim) == 0)
6859 our_id = ahc->our_id;
6860 else
6861 our_id = ahc->our_id_b;
6863 if (ccb->ccb_h.target_id != our_id) {
6865 * our_id represents our initiator ID, or
6866 * the ID of the first target to have an
6867 * enabled lun in target mode. There are
6868 * two cases that may preclude enabling a
6869 * target id other than our_id.
6871 * o our_id is for an active initiator role.
6872 * Since the hardware does not support
6873 * reselections to the initiator role at
6874 * anything other than our_id, and our_id
6875 * is used by the hardware to indicate the
6876 * ID to use for both select-out and
6877 * reselect-out operations, the only target
6878 * ID we can support in this mode is our_id.
6880 * o The MULTARGID feature is not available and
6881 * a previous target mode ID has been enabled.
6883 if ((ahc->features & AHC_MULTIROLE) != 0) {
6885 if ((ahc->features & AHC_MULTI_TID) != 0
6886 && (ahc->flags & AHC_INITIATORROLE) != 0) {
6888 * Only allow additional targets if
6889 * the initiator role is disabled.
6890 * The hardware cannot handle a re-select-in
6891 * on the initiator id during a re-select-out
6892 * on a different target id.
6894 status = CAM_TID_INVALID;
6895 } else if ((ahc->flags & AHC_INITIATORROLE) != 0
6896 || ahc->enabled_luns > 0) {
6898 * Only allow our target id to change
6899 * if the initiator role is not configured
6900 * and there are no enabled luns which
6901 * are attached to the currently registered
6902 * scsi id.
6904 status = CAM_TID_INVALID;
6906 } else if ((ahc->features & AHC_MULTI_TID) == 0
6907 && ahc->enabled_luns > 0) {
6909 status = CAM_TID_INVALID;
6913 if (status != CAM_REQ_CMP) {
6914 ccb->ccb_h.status = status;
6915 return;
6919 * We now have an id that is valid.
6920 * If we aren't in target mode, switch modes.
6922 if ((ahc->flags & AHC_TARGETROLE) == 0
6923 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
6924 u_long s;
6925 ahc_flag saved_flags;
6927 printf("Configuring Target Mode\n");
6928 ahc_lock(ahc, &s);
6929 if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
6930 ccb->ccb_h.status = CAM_BUSY;
6931 ahc_unlock(ahc, &s);
6932 return;
6934 saved_flags = ahc->flags;
6935 ahc->flags |= AHC_TARGETROLE;
6936 if ((ahc->features & AHC_MULTIROLE) == 0)
6937 ahc->flags &= ~AHC_INITIATORROLE;
6938 ahc_pause(ahc);
6939 error = ahc_loadseq(ahc);
6940 if (error != 0) {
6942 * Restore original configuration and notify
6943 * the caller that we cannot support target mode.
6944 * Since the adapter started out in this
6945 * configuration, the firmware load will succeed,
6946 * so there is no point in checking ahc_loadseq's
6947 * return value.
6949 ahc->flags = saved_flags;
6950 (void)ahc_loadseq(ahc);
6951 ahc_restart(ahc);
6952 ahc_unlock(ahc, &s);
6953 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
6954 return;
6956 ahc_restart(ahc);
6957 ahc_unlock(ahc, &s);
6959 cel = &ccb->cel;
6960 target = ccb->ccb_h.target_id;
6961 lun = ccb->ccb_h.target_lun;
6962 channel = SIM_CHANNEL(ahc, sim);
6963 target_mask = 0x01 << target;
6964 if (channel == 'B')
6965 target_mask <<= 8;
6967 if (cel->enable != 0) {
6968 u_int scsiseq;
6970 /* Are we already enabled?? */
6971 if (lstate != NULL) {
6972 xpt_print_path(ccb->ccb_h.path);
6973 printf("Lun already enabled\n");
6974 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
6975 return;
6978 if (cel->grp6_len != 0
6979 || cel->grp7_len != 0) {
6981 * Don't (yet?) support vendor
6982 * specific commands.
6984 ccb->ccb_h.status = CAM_REQ_INVALID;
6985 printf("Non-zero Group Codes\n");
6986 return;
6990 * Seems to be okay.
6991 * Setup our data structures.
6993 if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
6994 tstate = ahc_alloc_tstate(ahc, target, channel);
6995 if (tstate == NULL) {
6996 xpt_print_path(ccb->ccb_h.path);
6997 printf("Couldn't allocate tstate\n");
6998 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
6999 return;
7002 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT);
7003 if (lstate == NULL) {
7004 xpt_print_path(ccb->ccb_h.path);
7005 printf("Couldn't allocate lstate\n");
7006 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7007 return;
7009 memset(lstate, 0, sizeof(*lstate));
7010 status = xpt_create_path(&lstate->path, /*periph*/NULL,
7011 xpt_path_path_id(ccb->ccb_h.path),
7012 xpt_path_target_id(ccb->ccb_h.path),
7013 xpt_path_lun_id(ccb->ccb_h.path));
7014 if (status != CAM_REQ_CMP) {
7015 free(lstate, M_DEVBUF);
7016 xpt_print_path(ccb->ccb_h.path);
7017 printf("Couldn't allocate path\n");
7018 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
7019 return;
7021 SLIST_INIT(&lstate->accept_tios);
7022 SLIST_INIT(&lstate->immed_notifies);
7023 ahc_lock(ahc, &s);
7024 ahc_pause(ahc);
7025 if (target != CAM_TARGET_WILDCARD) {
7026 tstate->enabled_luns[lun] = lstate;
7027 ahc->enabled_luns++;
7029 if ((ahc->features & AHC_MULTI_TID) != 0) {
7030 u_int targid_mask;
7032 targid_mask = ahc_inb(ahc, TARGID)
7033 | (ahc_inb(ahc, TARGID + 1) << 8);
7035 targid_mask |= target_mask;
7036 ahc_outb(ahc, TARGID, targid_mask);
7037 ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
7039 ahc_update_scsiid(ahc, targid_mask);
7040 } else {
7041 u_int our_id;
7042 char channel;
7044 channel = SIM_CHANNEL(ahc, sim);
7045 our_id = SIM_SCSI_ID(ahc, sim);
7048 * This can only happen if selections
7049 * are not enabled
7051 if (target != our_id) {
7052 u_int sblkctl;
7053 char cur_channel;
7054 int swap;
7056 sblkctl = ahc_inb(ahc, SBLKCTL);
7057 cur_channel = (sblkctl & SELBUSB)
7058 ? 'B' : 'A';
7059 if ((ahc->features & AHC_TWIN) == 0)
7060 cur_channel = 'A';
7061 swap = cur_channel != channel;
7062 if (channel == 'A')
7063 ahc->our_id = target;
7064 else
7065 ahc->our_id_b = target;
7067 if (swap)
7068 ahc_outb(ahc, SBLKCTL,
7069 sblkctl ^ SELBUSB);
7071 ahc_outb(ahc, SCSIID, target);
7073 if (swap)
7074 ahc_outb(ahc, SBLKCTL, sblkctl);
7077 } else
7078 ahc->black_hole = lstate;
7079 /* Allow select-in operations */
7080 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
7081 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7082 scsiseq |= ENSELI;
7083 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7084 scsiseq = ahc_inb(ahc, SCSISEQ);
7085 scsiseq |= ENSELI;
7086 ahc_outb(ahc, SCSISEQ, scsiseq);
7088 ahc_unpause(ahc);
7089 ahc_unlock(ahc, &s);
7090 ccb->ccb_h.status = CAM_REQ_CMP;
7091 xpt_print_path(ccb->ccb_h.path);
7092 printf("Lun now enabled for target mode\n");
7093 } else {
7094 struct scb *scb;
7095 int i, empty;
7097 if (lstate == NULL) {
7098 ccb->ccb_h.status = CAM_LUN_INVALID;
7099 return;
7102 ahc_lock(ahc, &s);
7104 ccb->ccb_h.status = CAM_REQ_CMP;
7105 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
7106 struct ccb_hdr *ccbh;
7108 ccbh = &scb->io_ctx->ccb_h;
7109 if (ccbh->func_code == XPT_CONT_TARGET_IO
7110 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
7111 printf("CTIO pending\n");
7112 ccb->ccb_h.status = CAM_REQ_INVALID;
7113 ahc_unlock(ahc, &s);
7114 return;
7118 if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
7119 printf("ATIOs pending\n");
7120 ccb->ccb_h.status = CAM_REQ_INVALID;
7123 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
7124 printf("INOTs pending\n");
7125 ccb->ccb_h.status = CAM_REQ_INVALID;
7128 if (ccb->ccb_h.status != CAM_REQ_CMP) {
7129 ahc_unlock(ahc, &s);
7130 return;
7133 xpt_print_path(ccb->ccb_h.path);
7134 printf("Target mode disabled\n");
7135 xpt_free_path(lstate->path);
7136 free(lstate, M_DEVBUF);
7138 ahc_pause(ahc);
7139 /* Can we clean up the target too? */
7140 if (target != CAM_TARGET_WILDCARD) {
7141 tstate->enabled_luns[lun] = NULL;
7142 ahc->enabled_luns--;
7143 for (empty = 1, i = 0; i < 8; i++)
7144 if (tstate->enabled_luns[i] != NULL) {
7145 empty = 0;
7146 break;
7149 if (empty) {
7150 ahc_free_tstate(ahc, target, channel,
7151 /*force*/FALSE);
7152 if (ahc->features & AHC_MULTI_TID) {
7153 u_int targid_mask;
7155 targid_mask = ahc_inb(ahc, TARGID)
7156 | (ahc_inb(ahc, TARGID + 1)
7157 << 8);
7159 targid_mask &= ~target_mask;
7160 ahc_outb(ahc, TARGID, targid_mask);
7161 ahc_outb(ahc, TARGID+1,
7162 (targid_mask >> 8));
7163 ahc_update_scsiid(ahc, targid_mask);
7166 } else {
7168 ahc->black_hole = NULL;
7171 * We can't allow selections without
7172 * our black hole device.
7174 empty = TRUE;
7176 if (ahc->enabled_luns == 0) {
7177 /* Disallow select-in */
7178 u_int scsiseq;
7180 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
7181 scsiseq &= ~ENSELI;
7182 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
7183 scsiseq = ahc_inb(ahc, SCSISEQ);
7184 scsiseq &= ~ENSELI;
7185 ahc_outb(ahc, SCSISEQ, scsiseq);
7187 if ((ahc->features & AHC_MULTIROLE) == 0) {
7188 printf("Configuring Initiator Mode\n");
7189 ahc->flags &= ~AHC_TARGETROLE;
7190 ahc->flags |= AHC_INITIATORROLE;
7192 * Returning to a configuration that
7193 * fit previously will always succeed.
7195 (void)ahc_loadseq(ahc);
7196 ahc_restart(ahc);
7198 * Unpaused. The extra unpause
7199 * that follows is harmless.
7203 ahc_unpause(ahc);
7204 ahc_unlock(ahc, &s);
7208 static void
7209 ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
7211 u_int scsiid_mask;
7212 u_int scsiid;
7214 if ((ahc->features & AHC_MULTI_TID) == 0)
7215 panic("ahc_update_scsiid called on non-multitid unit\n");
7218 * Since we will rely on the TARGID mask
7219 * for selection enables, ensure that OID
7220 * in SCSIID is not set to some other ID
7221 * that we don't want to allow selections on.
7223 if ((ahc->features & AHC_ULTRA2) != 0)
7224 scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
7225 else
7226 scsiid = ahc_inb(ahc, SCSIID);
7227 scsiid_mask = 0x1 << (scsiid & OID);
7228 if ((targid_mask & scsiid_mask) == 0) {
7229 u_int our_id;
7231 /* ffs counts from 1 */
7232 our_id = ffs(targid_mask);
7233 if (our_id == 0)
7234 our_id = ahc->our_id;
7235 else
7236 our_id--;
7237 scsiid &= TID;
7238 scsiid |= our_id;
7240 if ((ahc->features & AHC_ULTRA2) != 0)
7241 ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
7242 else
7243 ahc_outb(ahc, SCSIID, scsiid);
7246 void
7247 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
7249 struct target_cmd *cmd;
7252 * If the card supports auto-access pause,
7253 * we can access the card directly regardless
7254 * of whether it is paused or not.
7256 if ((ahc->features & AHC_AUTOPAUSE) != 0)
7257 paused = TRUE;
7259 ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
7260 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
7263 * Only advance through the queue if we
7264 * have the resources to process the command.
7266 if (ahc_handle_target_cmd(ahc, cmd) != 0)
7267 break;
7269 cmd->cmd_valid = 0;
7270 ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
7271 ahc->shared_data_dmamap,
7272 ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
7273 sizeof(struct target_cmd),
7274 BUS_DMASYNC_PREREAD);
7275 ahc->tqinfifonext++;
7278 * Lazily update our position in the target mode incoming
7279 * command queue as seen by the sequencer.
7281 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
7282 if ((ahc->features & AHC_HS_MAILBOX) != 0) {
7283 u_int hs_mailbox;
7285 hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
7286 hs_mailbox &= ~HOST_TQINPOS;
7287 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
7288 ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
7289 } else {
7290 if (!paused)
7291 ahc_pause(ahc);
7292 ahc_outb(ahc, KERNEL_TQINPOS,
7293 ahc->tqinfifonext & HOST_TQINPOS);
7294 if (!paused)
7295 ahc_unpause(ahc);
7301 static int
7302 ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
7304 struct ahc_tmode_tstate *tstate;
7305 struct ahc_tmode_lstate *lstate;
7306 struct ccb_accept_tio *atio;
7307 uint8_t *byte;
7308 int initiator;
7309 int target;
7310 int lun;
7312 initiator = SCSIID_TARGET(ahc, cmd->scsiid);
7313 target = SCSIID_OUR_ID(cmd->scsiid);
7314 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK);
7316 byte = cmd->bytes;
7317 tstate = ahc->enabled_targets[target];
7318 lstate = NULL;
7319 if (tstate != NULL)
7320 lstate = tstate->enabled_luns[lun];
7323 * Commands for disabled luns go to the black hole driver.
7325 if (lstate == NULL)
7326 lstate = ahc->black_hole;
7328 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
7329 if (atio == NULL) {
7330 ahc->flags |= AHC_TQINFIFO_BLOCKED;
7332 * Wait for more ATIOs from the peripheral driver for this lun.
7334 if (bootverbose)
7335 printf("%s: ATIOs exhausted\n", ahc_name(ahc));
7336 return (1);
7337 } else
7338 ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
7339 #if 0
7340 printf("Incoming command from %d for %d:%d%s\n",
7341 initiator, target, lun,
7342 lstate == ahc->black_hole ? "(Black Holed)" : "");
7343 #endif
7344 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
7346 if (lstate == ahc->black_hole) {
7347 /* Fill in the wildcards */
7348 atio->ccb_h.target_id = target;
7349 atio->ccb_h.target_lun = lun;
7353 * Package it up and send it off to
7354 * whomever has this lun enabled.
7356 atio->sense_len = 0;
7357 atio->init_id = initiator;
7358 if (byte[0] != 0xFF) {
7359 /* Tag was included */
7360 atio->tag_action = *byte++;
7361 atio->tag_id = *byte++;
7362 atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
7363 } else {
7364 atio->ccb_h.flags = 0;
7366 byte++;
7368 /* Okay. Now determine the cdb size based on the command code */
7369 switch (*byte >> CMD_GROUP_CODE_SHIFT) {
7370 case 0:
7371 atio->cdb_len = 6;
7372 break;
7373 case 1:
7374 case 2:
7375 atio->cdb_len = 10;
7376 break;
7377 case 4:
7378 atio->cdb_len = 16;
7379 break;
7380 case 5:
7381 atio->cdb_len = 12;
7382 break;
7383 case 3:
7384 default:
7385 /* Only copy the opcode. */
7386 atio->cdb_len = 1;
7387 printf("Reserved or VU command code type encountered\n");
7388 break;
7391 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
7393 atio->ccb_h.status |= CAM_CDB_RECVD;
7395 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
7397 * We weren't allowed to disconnect.
7398 * We're hanging on the bus until a
7399 * continue target I/O comes in response
7400 * to this accept tio.
7402 #if 0
7403 printf("Received Immediate Command %d:%d:%d - %p\n",
7404 initiator, target, lun, ahc->pending_device);
7405 #endif
7406 ahc->pending_device = lstate;
7407 ahc_freeze_ccb((union ccb *)atio);
7408 atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
7410 xpt_done((union ccb*)atio);
7411 return (0);
7414 #endif