1 /* $NetBSD: aic79xx_osm.c,v 1.29 2009/09/26 14:44:10 tsutsui Exp $ */
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.29 2009/09/26 14:44:10 tsutsui Exp $");
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic79xx_inline.h>
49 #ifndef AHD_TMODE_ENABLE
50 #define AHD_TMODE_ENABLE 0
53 static int ahd_ioctl(struct scsipi_channel
*channel
, u_long cmd
,
54 void *addr
, int flag
, struct proc
*p
);
55 static void ahd_action(struct scsipi_channel
*chan
,
56 scsipi_adapter_req_t req
, void *arg
);
57 static void ahd_execute_scb(void *arg
, bus_dma_segment_t
*dm_segs
,
59 static int ahd_poll(struct ahd_softc
*ahd
, int wait
);
60 static void ahd_setup_data(struct ahd_softc
*ahd
, struct scsipi_xfer
*xs
,
64 static void ahd_set_recoveryscb(struct ahd_softc
*ahd
, struct scb
*scb
);
67 static bool ahd_pmf_suspend(device_t
, pmf_qual_t
);
68 static bool ahd_pmf_resume(device_t
, pmf_qual_t
);
69 static bool ahd_pmf_shutdown(device_t
, int);
72 * Attach all the sub-devices we can find
75 ahd_attach(struct ahd_softc
*ahd
)
80 ahd_controller_info(ahd
, ahd_info
, sizeof(ahd_info
));
81 printf("%s: %s\n", ahd_name(ahd
), ahd_info
);
85 ahd
->sc_adapter
.adapt_dev
= ahd
->sc_dev
;
86 ahd
->sc_adapter
.adapt_nchannels
= 1;
88 ahd
->sc_adapter
.adapt_openings
= ahd
->scb_data
.numscbs
- 1;
89 ahd
->sc_adapter
.adapt_max_periph
= 32;
91 ahd
->sc_adapter
.adapt_ioctl
= ahd_ioctl
;
92 ahd
->sc_adapter
.adapt_minphys
= ahd_minphys
;
93 ahd
->sc_adapter
.adapt_request
= ahd_action
;
95 ahd
->sc_channel
.chan_adapter
= &ahd
->sc_adapter
;
96 ahd
->sc_channel
.chan_bustype
= &scsi_bustype
;
97 ahd
->sc_channel
.chan_channel
= 0;
98 ahd
->sc_channel
.chan_ntargets
= AHD_NUM_TARGETS
;
99 ahd
->sc_channel
.chan_nluns
= 8 /*AHD_NUM_LUNS*/;
100 ahd
->sc_channel
.chan_id
= ahd
->our_id
;
101 ahd
->sc_channel
.chan_flags
|= SCSIPI_CHAN_CANGROW
;
103 ahd
->sc_child
= config_found(ahd
->sc_dev
, &ahd
->sc_channel
, scsiprint
);
105 ahd_intr_enable(ahd
, TRUE
);
107 if (ahd
->flags
& AHD_RESET_BUS_A
)
108 ahd_reset_channel(ahd
, 'A', TRUE
);
110 if (!pmf_device_register1(ahd
->sc_dev
,
111 ahd_pmf_suspend
, ahd_pmf_resume
, ahd_pmf_shutdown
))
112 aprint_error_dev(ahd
->sc_dev
,
113 "couldn't establish power handler\n");
121 ahd_pmf_suspend(device_t dev
, pmf_qual_t qual
)
123 struct ahd_softc
*sc
= device_private(dev
);
125 return (ahd_suspend(sc
) == 0);
133 ahd_pmf_resume(device_t dev
, pmf_qual_t qual
)
136 struct ahd_softc
*sc
= device_private(dev
);
138 return (ahd_resume(sc
) == 0);
145 ahd_pmf_shutdown(device_t dev
, int howto
)
147 struct ahd_softc
*sc
= device_private(dev
);
149 /* Disable all interrupt sources by resetting the controller */
156 ahd_ioctl(struct scsipi_channel
*channel
, u_long cmd
,
157 void *addr
, int flag
, struct proc
*p
)
159 struct ahd_softc
*ahd
;
162 ahd
= device_private(channel
->chan_adapter
->adapt_dev
);
167 ahd_reset_channel(ahd
, channel
->chan_channel
== 1 ? 'B' : 'A', TRUE
);
179 * Catch an interrupt from the adapter
182 ahd_platform_intr(void *arg
)
184 struct ahd_softc
*ahd
;
188 printf("%s; ahd_platform_intr\n", ahd_name(ahd
));
194 * We have an scb which has been processed by the
195 * adaptor, now we look to see how the operation * went.
198 ahd_done(struct ahd_softc
*ahd
, struct scb
*scb
)
200 struct scsipi_xfer
*xs
;
201 struct scsipi_periph
*periph
;
204 LIST_REMOVE(scb
, pending_links
);
207 periph
= xs
->xs_periph
;
209 callout_stop(&scb
->xs
->xs_callout
);
214 if (xs
->xs_control
& XS_CTL_DATA_IN
)
215 op
= BUS_DMASYNC_POSTREAD
;
217 op
= BUS_DMASYNC_POSTWRITE
;
219 bus_dmamap_sync(ahd
->parent_dmat
, scb
->dmamap
, 0,
220 scb
->dmamap
->dm_mapsize
, op
);
221 bus_dmamap_unload(ahd
->parent_dmat
, scb
->dmamap
);
225 * If the recovery SCB completes, we have to be
226 * out of our timeout.
228 if ((scb
->flags
& SCB_RECOVERY_SCB
) != 0) {
229 struct scb
*list_scb
;
232 * We were able to complete the command successfully,
233 * so reinstate the timeouts for all other pending
236 LIST_FOREACH(list_scb
, &ahd
->pending_scbs
, pending_links
) {
237 struct scsipi_xfer
*txs
= list_scb
->xs
;
239 if (!(txs
->xs_control
& XS_CTL_POLL
)) {
240 callout_reset(&txs
->xs_callout
,
241 (txs
->timeout
> 1000000) ?
242 (txs
->timeout
/ 1000) * hz
:
243 (txs
->timeout
* hz
) / 1000,
244 ahd_timeout
, list_scb
);
248 if (ahd_get_transaction_status(scb
) != XS_NOERROR
)
249 ahd_set_transaction_status(scb
, XS_TIMEOUT
);
250 scsipi_printaddr(xs
->xs_periph
);
251 printf("%s: no longer in timeout, status = %x\n",
252 ahd_name(ahd
), xs
->status
);
255 if (xs
->error
!= XS_NOERROR
) {
256 /* Don't clobber any existing error state */
257 } else if ((xs
->status
== SCSI_STATUS_BUSY
) ||
258 (xs
->status
== SCSI_STATUS_QUEUE_FULL
)) {
259 ahd_set_transaction_status(scb
, XS_BUSY
);
260 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
261 ahd_name(ahd
), SCB_GET_TARGET(ahd
,scb
), SCB_GET_LUN(scb
), SCB_GET_TAG(scb
));
262 } else if ((scb
->flags
& SCB_SENSE
) != 0) {
264 * We performed autosense retrieval.
266 * zero the sense data before having
267 * the drive fill it. The SCSI spec mandates
268 * that any untransferred data should be
269 * assumed to be zero. Complete the 'bounce'
270 * of sense information through buffers accessible
271 * via bus-space by copying it into the clients
274 memset(&xs
->sense
.scsi_sense
, 0, sizeof(xs
->sense
.scsi_sense
));
275 memcpy(&xs
->sense
.scsi_sense
, ahd_get_sense_buf(ahd
, scb
),
276 sizeof(struct scsi_sense_data
));
278 ahd_set_transaction_status(scb
, XS_SENSE
);
279 } else if ((scb
->flags
& SCB_PKT_SENSE
) != 0) {
280 struct scsi_status_iu_header
*siu
;
286 * Copy only the sense data into the provided buffer.
288 siu
= (struct scsi_status_iu_header
*)scb
->sense_data
;
289 sense_len
= MIN(scsi_4btoul(siu
->sense_length
),
290 sizeof(xs
->sense
.scsi_sense
));
291 memset(&xs
->sense
.scsi_sense
, 0, sizeof(xs
->sense
.scsi_sense
));
292 memcpy(&xs
->sense
.scsi_sense
,
293 scb
->sense_data
+ SIU_SENSE_OFFSET(siu
), sense_len
);
295 printf("Copied %d bytes of sense data offset %d:", sense_len
,
296 SIU_SENSE_OFFSET(siu
));
297 for (i
= 0; i
< sense_len
; i
++)
298 printf(" 0x%x", ((uint8_t *)&xs
->sense
.scsi_sense
)[i
]);
301 ahd_set_transaction_status(scb
, XS_SENSE
);
304 if (scb
->flags
& SCB_FREEZE_QUEUE
) {
305 scsipi_periph_thaw(periph
, 1);
306 scb
->flags
&= ~SCB_FREEZE_QUEUE
;
309 if (scb
->flags
& SCB_REQUEUE
)
310 ahd_set_transaction_status(scb
, XS_REQUEUE
);
313 ahd_free_scb(ahd
, scb
);
320 ahd_action(struct scsipi_channel
*chan
, scsipi_adapter_req_t req
, void *arg
)
322 struct ahd_softc
*ahd
;
323 struct ahd_initiator_tinfo
*tinfo
;
324 struct ahd_tmode_tstate
*tstate
;
326 ahd
= device_private(chan
->chan_adapter
->adapt_dev
);
330 case ADAPTER_REQ_RUN_XFER
:
332 struct scsipi_xfer
*xs
;
333 struct scsipi_periph
*periph
;
335 struct hardware_scb
*hscb
;
343 periph
= xs
->xs_periph
;
345 SC_DEBUG(periph
, SCSIPI_DB3
, ("ahd_action\n"));
347 target_id
= periph
->periph_target
;
348 our_id
= ahd
->our_id
;
349 channel
= (chan
->chan_channel
== 1) ? 'B' : 'A';
355 tinfo
= ahd_fetch_transinfo(ahd
, channel
, our_id
,
358 if (xs
->xs_tag_type
!= 0 ||
359 (tinfo
->curr
.ppr_options
& MSG_EXT_PPR_IU_REQ
) != 0)
360 col_idx
= AHD_NEVER_COL_IDX
;
362 col_idx
= AHD_BUILD_COL_IDX(target_id
,
365 if ((scb
= ahd_get_scb(ahd
, col_idx
)) == NULL
) {
366 xs
->error
= XS_RESOURCE_SHORTAGE
;
375 SC_DEBUG(periph
, SCSIPI_DB3
, ("start scb(%p)\n", scb
));
379 * Put all the arguments for the xfer in the scb
382 hscb
->scsiid
= BUILD_SCSIID(ahd
, sim
, target_id
, our_id
);
383 hscb
->lun
= periph
->periph_lun
;
384 if (xs
->xs_control
& XS_CTL_RESET
) {
386 scb
->flags
|= SCB_DEVICE_RESET
;
387 hscb
->control
|= MK_MESSAGE
;
388 hscb
->task_management
= SIU_TASKMGMT_LUN_RESET
;
389 ahd_execute_scb(scb
, NULL
, 0);
391 hscb
->task_management
= 0;
394 ahd_setup_data(ahd
, xs
, scb
);
398 case ADAPTER_REQ_GROW_RESOURCES
:
400 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd
));
402 chan
->chan_adapter
->adapt_openings
+= ahd_alloc_scbs(ahd
);
403 if (ahd
->scb_data
.numscbs
>= AHD_SCB_MAX_ALLOC
)
404 chan
->chan_flags
&= ~SCSIPI_CHAN_CANGROW
;
407 case ADAPTER_REQ_SET_XFER_MODE
:
409 struct scsipi_xfer_mode
*xm
= arg
;
410 struct ahd_devinfo devinfo
;
411 int target_id
, our_id
, first
;
415 u_int ppr_options
= 0, period
, offset
;
416 uint16_t old_autoneg
;
418 target_id
= xm
->xm_target
;
419 our_id
= chan
->chan_id
;
422 tinfo
= ahd_fetch_transinfo(ahd
, channel
, our_id
, target_id
,
424 ahd_compile_devinfo(&devinfo
, our_id
, target_id
,
425 0, channel
, ROLE_INITIATOR
);
427 old_autoneg
= tstate
->auto_negotiate
;
430 * XXX since the period and offset are not provided here,
431 * fake things by forcing a renegotiation using the user
432 * settings if this is called for the first time (i.e.
433 * during probe). Also, cap various values at the user
434 * values, assuming that the user set it up that way.
436 if (ahd
->inited_target
[target_id
] == 0) {
437 period
= tinfo
->user
.period
;
438 offset
= tinfo
->user
.offset
;
439 ppr_options
= tinfo
->user
.ppr_options
;
440 width
= tinfo
->user
.width
;
442 (ahd
->user_tagenable
& devinfo
.target_mask
);
443 tstate
->discenable
|=
444 (ahd
->user_discenable
& devinfo
.target_mask
);
445 ahd
->inited_target
[target_id
] = 1;
450 if (xm
->xm_mode
& (PERIPH_CAP_WIDE16
| PERIPH_CAP_DT
))
451 width
= MSG_EXT_WDTR_BUS_16_BIT
;
453 width
= MSG_EXT_WDTR_BUS_8_BIT
;
455 ahd_validate_width(ahd
, NULL
, &width
, ROLE_UNKNOWN
);
456 if (width
> tinfo
->user
.width
)
457 width
= tinfo
->user
.width
;
458 ahd_set_width(ahd
, &devinfo
, width
, AHD_TRANS_GOAL
, FALSE
);
460 if (!(xm
->xm_mode
& (PERIPH_CAP_SYNC
| PERIPH_CAP_DT
))) {
466 if ((xm
->xm_mode
& PERIPH_CAP_DT
) &&
467 (tinfo
->user
.ppr_options
& MSG_EXT_PPR_DT_REQ
))
468 ppr_options
|= MSG_EXT_PPR_DT_REQ
;
470 ppr_options
&= ~MSG_EXT_PPR_DT_REQ
;
472 if ((tstate
->discenable
& devinfo
.target_mask
) == 0 ||
473 (tstate
->tagenable
& devinfo
.target_mask
) == 0)
474 ppr_options
&= ~MSG_EXT_PPR_IU_REQ
;
476 if ((xm
->xm_mode
& PERIPH_CAP_TQING
) &&
477 (ahd
->user_tagenable
& devinfo
.target_mask
))
478 tstate
->tagenable
|= devinfo
.target_mask
;
480 tstate
->tagenable
&= ~devinfo
.target_mask
;
482 ahd_find_syncrate(ahd
, &period
, &ppr_options
, AHD_SYNCRATE_MAX
);
483 ahd_validate_offset(ahd
, NULL
, period
, &offset
,
484 MSG_EXT_WDTR_BUS_8_BIT
, ROLE_UNKNOWN
);
490 && tinfo
->user
.transport_version
>= 3) {
491 tinfo
->goal
.transport_version
=
492 tinfo
->user
.transport_version
;
493 tinfo
->curr
.transport_version
=
494 tinfo
->user
.transport_version
;
497 ahd_set_syncrate(ahd
, &devinfo
, period
, offset
,
498 ppr_options
, AHD_TRANS_GOAL
, FALSE
);
501 * If this is the first request, and no negotiation is
502 * needed, just confirm the state to the scsipi layer,
503 * so that it can print a message.
505 if (old_autoneg
== tstate
->auto_negotiate
&& first
) {
507 xm
->xm_period
= tinfo
->curr
.period
;
508 xm
->xm_offset
= tinfo
->curr
.offset
;
509 if (tinfo
->curr
.width
== MSG_EXT_WDTR_BUS_16_BIT
)
510 xm
->xm_mode
|= PERIPH_CAP_WIDE16
;
511 if (tinfo
->curr
.period
)
512 xm
->xm_mode
|= PERIPH_CAP_SYNC
;
513 if (tstate
->tagenable
& devinfo
.target_mask
)
514 xm
->xm_mode
|= PERIPH_CAP_TQING
;
515 if (tinfo
->curr
.ppr_options
& MSG_EXT_PPR_DT_REQ
)
516 xm
->xm_mode
|= PERIPH_CAP_DT
;
517 scsipi_async_event(chan
, ASYNC_EVENT_XFER_MODE
, xm
);
527 ahd_execute_scb(void *arg
, bus_dma_segment_t
*dm_segs
, int nsegments
)
530 struct scsipi_xfer
*xs
;
531 struct ahd_softc
*ahd
;
532 struct ahd_initiator_tinfo
*tinfo
;
533 struct ahd_tmode_tstate
*tstate
;
542 ahd
= device_private(
543 xs
->xs_periph
->periph_channel
->chan_adapter
->adapt_dev
);
546 if (nsegments
!= 0) {
551 ahd_setup_data_scb(ahd
, scb
);
553 /* Copy the segments into our SG list */
554 for (i
= nsegments
, sg
= scb
->sg_list
; i
> 0; i
--) {
556 sg
= ahd_sg_setup(ahd
, scb
, sg
, dm_segs
->ds_addr
,
562 if (xs
->xs_control
& XS_CTL_DATA_IN
)
563 op
= BUS_DMASYNC_PREREAD
;
565 op
= BUS_DMASYNC_PREWRITE
;
567 bus_dmamap_sync(ahd
->parent_dmat
, scb
->dmamap
, 0,
568 scb
->dmamap
->dm_mapsize
, op
);
574 * Last time we need to check if this SCB needs to
577 if (ahd_get_scsi_status(scb
) == XS_STS_DONE
) {
579 bus_dmamap_unload(ahd
->parent_dmat
,
581 ahd_free_scb(ahd
, scb
);
586 tinfo
= ahd_fetch_transinfo(ahd
, SCSIID_CHANNEL(ahd
, scb
->hscb
->scsiid
),
587 SCSIID_OUR_ID(scb
->hscb
->scsiid
),
588 SCSIID_TARGET(ahd
, scb
->hscb
->scsiid
),
591 mask
= SCB_GET_TARGET_MASK(ahd
, scb
);
593 if ((tstate
->discenable
& mask
) != 0)
594 scb
->hscb
->control
|= DISCENB
;
596 if ((tstate
->tagenable
& mask
) != 0)
597 scb
->hscb
->control
|= xs
->xs_tag_type
|TAG_ENB
;
599 if ((tinfo
->curr
.ppr_options
& MSG_EXT_PPR_IU
) != 0) {
600 scb
->flags
|= SCB_PACKETIZED
;
601 if (scb
->hscb
->task_management
!= 0)
602 scb
->hscb
->control
&= ~MK_MESSAGE
;
605 #if 0 /* This looks like it makes sense at first, but it can loop */
606 if ((xs
->xs_control
& XS_CTL_DISCOVERY
) &&
607 (tinfo
->goal
.width
!= 0
608 || tinfo
->goal
.period
!= 0
609 || tinfo
->goal
.ppr_options
!= 0)) {
610 scb
->flags
|= SCB_NEGOTIATE
;
611 scb
->hscb
->control
|= MK_MESSAGE
;
614 if ((tstate
->auto_negotiate
& mask
) != 0) {
615 scb
->flags
|= SCB_AUTO_NEGOTIATE
;
616 scb
->hscb
->control
|= MK_MESSAGE
;
619 LIST_INSERT_HEAD(&ahd
->pending_scbs
, scb
, pending_links
);
621 scb
->flags
|= SCB_ACTIVE
;
623 if (!(xs
->xs_control
& XS_CTL_POLL
)) {
624 callout_reset(&scb
->xs
->xs_callout
, xs
->timeout
> 1000000 ?
625 (xs
->timeout
/ 1000) * hz
: (xs
->timeout
* hz
) / 1000,
629 if ((scb
->flags
& SCB_TARGET_IMMEDIATE
) != 0) {
630 /* Define a mapping from our tag to the SCB. */
631 ahd
->scb_data
.scbindex
[SCB_GET_TAG(scb
)] = scb
;
633 ahd_set_scbptr(ahd
, SCB_GET_TAG(scb
));
634 ahd_outb(ahd
, RETURN_1
, CONT_MSG_LOOP_TARG
);
637 ahd_queue_scb(ahd
, scb
);
640 if (!(xs
->xs_control
& XS_CTL_POLL
)) {
645 * If we can't use interrupts, poll for completion
647 SC_DEBUG(xs
->xs_periph
, SCSIPI_DB3
, ("cmd_poll\n"));
649 if (ahd_poll(ahd
, xs
->timeout
)) {
650 if (!(xs
->xs_control
& XS_CTL_SILENT
))
651 printf("cmd fail\n");
655 } while (!(xs
->xs_status
& XS_STS_DONE
));
661 ahd_poll(struct ahd_softc
*ahd
, int wait
)
666 if (ahd_inb(ahd
, INTSTAT
) & INT_PEND
)
671 printf("%s: board is not responding\n", ahd_name(ahd
));
681 ahd_setup_data(struct ahd_softc
*ahd
, struct scsipi_xfer
*xs
,
684 struct hardware_scb
*hscb
;
687 xs
->resid
= xs
->status
= 0;
689 hscb
->cdb_len
= xs
->cmdlen
;
690 if (hscb
->cdb_len
> MAX_CDB_LEN
) {
693 * Should CAM start to support CDB sizes
694 * greater than 16 bytes, we could use
695 * the sense buffer to store the CDB.
697 ahd_set_transaction_status(scb
,
701 ahd_free_scb(ahd
, scb
);
705 memcpy(hscb
->shared_data
.idata
.cdb
, xs
->cmd
, hscb
->cdb_len
);
707 /* Only use S/G if there is a transfer */
711 error
= bus_dmamap_load(ahd
->parent_dmat
,
712 scb
->dmamap
, xs
->data
,
714 ((xs
->xs_control
& XS_CTL_NOSLEEP
) ?
715 BUS_DMA_NOWAIT
: BUS_DMA_WAITOK
) |
717 ((xs
->xs_control
& XS_CTL_DATA_IN
) ?
718 BUS_DMA_READ
: BUS_DMA_WRITE
));
721 printf("%s: in ahd_setup_data(): bus_dmamap_load() "
723 ahd_name(ahd
), error
);
725 xs
->error
= XS_RESOURCE_SHORTAGE
;
730 scb
->dmamap
->dm_segs
,
731 scb
->dmamap
->dm_nsegs
);
733 ahd_execute_scb(scb
, NULL
, 0);
738 ahd_timeout(void *arg
)
741 struct ahd_softc
*ahd
;
742 ahd_mode_state saved_modes
;
746 ahd
= scb
->ahd_softc
;
748 printf("%s: ahd_timeout\n", ahd_name(ahd
));
752 ahd_pause_and_flushwork(ahd
);
753 saved_modes
= ahd_save_modes(ahd
);
755 ahd_set_modes(ahd
, AHD_MODE_SCSI
, AHD_MODE_SCSI
);
756 ahd_outb(ahd
, SCSISIGO
, ACKO
);
758 ahd_outb(ahd
, SCSISIGO
, 0);
759 printf("clearing Ack\n");
760 ahd_restore_modes(ahd
, saved_modes
);
762 if ((scb
->flags
& SCB_ACTIVE
) == 0) {
763 /* Previous timeout took care of me already */
764 printf("%s: Timedout SCB already complete. "
765 "Interrupts may not be functioning.\n", ahd_name(ahd
));
771 ahd_print_path(ahd
, scb
);
772 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb
));
773 ahd_dump_card_state(ahd
);
774 ahd_reset_channel(ahd
, SIM_CHANNEL(ahd
, sim
),
775 /*initiate reset*/TRUE
);
781 ahd_platform_alloc(struct ahd_softc
*ahd
, void *platform_arg
)
783 ahd
->platform_data
= malloc(sizeof(struct ahd_platform_data
), M_DEVBUF
,
784 M_NOWAIT
/*| M_ZERO*/);
785 if (ahd
->platform_data
== NULL
)
788 memset(ahd
->platform_data
, 0, sizeof(struct ahd_platform_data
));
794 ahd_platform_free(struct ahd_softc
*ahd
)
796 free(ahd
->platform_data
, M_DEVBUF
);
800 ahd_softc_comp(struct ahd_softc
*lahd
, struct ahd_softc
*rahd
)
802 /* We don't sort softcs under NetBSD so report equal always */
807 ahd_detach(struct ahd_softc
*ahd
, int flags
)
811 if (ahd
->sc_child
!= NULL
)
812 rv
= config_detach(ahd
->sc_child
, flags
);
814 pmf_device_deregister(ahd
->sc_dev
);
822 ahd_platform_set_tags(struct ahd_softc
*ahd
,
823 struct ahd_devinfo
*devinfo
, ahd_queue_alg alg
)
825 struct ahd_tmode_tstate
*tstate
;
827 ahd_fetch_transinfo(ahd
, devinfo
->channel
, devinfo
->our_scsiid
,
828 devinfo
->target
, &tstate
);
830 if (alg
!= AHD_QUEUE_NONE
)
831 tstate
->tagenable
|= devinfo
->target_mask
;
833 tstate
->tagenable
&= ~devinfo
->target_mask
;
837 ahd_send_async(struct ahd_softc
*ahd
, char channel
, u_int target
, u_int lun
,
838 ac_code code
, void *opt_arg
)
840 struct ahd_tmode_tstate
*tstate
;
841 struct ahd_initiator_tinfo
*tinfo
;
842 struct ahd_devinfo devinfo
;
843 struct scsipi_channel
*chan
;
844 struct scsipi_xfer_mode xm
;
848 panic("ahd_send_async: not channel A");
850 chan
= &ahd
->sc_channel
;
852 case AC_TRANSFER_NEG
:
853 tinfo
= ahd_fetch_transinfo(ahd
, channel
, ahd
->our_id
, target
,
855 ahd_compile_devinfo(&devinfo
, ahd
->our_id
, target
, lun
,
856 channel
, ROLE_UNKNOWN
);
858 * Don't bother if negotiating. XXX?
860 if (tinfo
->curr
.period
!= tinfo
->goal
.period
861 || tinfo
->curr
.width
!= tinfo
->goal
.width
862 || tinfo
->curr
.offset
!= tinfo
->goal
.offset
863 || tinfo
->curr
.ppr_options
!= tinfo
->goal
.ppr_options
)
865 xm
.xm_target
= target
;
867 xm
.xm_period
= tinfo
->curr
.period
;
868 xm
.xm_offset
= tinfo
->curr
.offset
;
869 if (tinfo
->goal
.ppr_options
& MSG_EXT_PPR_DT_REQ
)
870 xm
.xm_mode
|= PERIPH_CAP_DT
;
871 if (tinfo
->curr
.width
== MSG_EXT_WDTR_BUS_16_BIT
)
872 xm
.xm_mode
|= PERIPH_CAP_WIDE16
;
873 if (tinfo
->curr
.period
)
874 xm
.xm_mode
|= PERIPH_CAP_SYNC
;
875 if (tstate
->tagenable
& devinfo
.target_mask
)
876 xm
.xm_mode
|= PERIPH_CAP_TQING
;
877 scsipi_async_event(chan
, ASYNC_EVENT_XFER_MODE
, &xm
);
880 scsipi_async_event(chan
, ASYNC_EVENT_RESET
, NULL
);