2 * Inline routines shareable across OS platforms.
4 * Copyright (c) 1994-2001 Justin T. Gibbs.
5 * Copyright (c) 2000-2001 Adaptec Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * substantially similar to the "NO WARRANTY" disclaimer below
16 * ("Disclaimer") and any redistribution must be conditioned upon
17 * including a substantially similar Disclaimer requirement for further
18 * binary redistribution.
19 * 3. Neither the names of the above-listed copyright holders nor the names
20 * of any contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
23 * Alternatively, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") version 2 as published by the Free
25 * Software Foundation.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
36 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGES.
40 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_inline.h#43 $
45 #ifndef _AIC7XXX_INLINE_H_
46 #define _AIC7XXX_INLINE_H_
48 /************************* Sequencer Execution Control ************************/
49 static __inline
void ahc_pause_bug_fix(struct ahc_softc
*ahc
);
50 static __inline
int ahc_is_paused(struct ahc_softc
*ahc
);
51 static __inline
void ahc_pause(struct ahc_softc
*ahc
);
52 static __inline
void ahc_unpause(struct ahc_softc
*ahc
);
55 * Work around any chip bugs related to halting sequencer execution.
56 * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
57 * reading a register that will set this signal and deassert it.
58 * Without this workaround, if the chip is paused, by an interrupt or
59 * manual pause while accessing scb ram, accesses to certain registers
60 * will hang the system (infinite pci retries).
63 ahc_pause_bug_fix(struct ahc_softc
*ahc
)
65 if ((ahc
->features
& AHC_ULTRA2
) != 0)
66 (void)ahc_inb(ahc
, CCSCBCTL
);
70 * Determine whether the sequencer has halted code execution.
71 * Returns non-zero status if the sequencer is stopped.
74 ahc_is_paused(struct ahc_softc
*ahc
)
76 return ((ahc_inb(ahc
, HCNTRL
) & PAUSE
) != 0);
80 * Request that the sequencer stop and wait, indefinitely, for it
81 * to stop. The sequencer will only acknowledge that it is paused
82 * once it has reached an instruction boundary and PAUSEDIS is
83 * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
84 * for critical sections.
87 ahc_pause(struct ahc_softc
*ahc
)
89 ahc_outb(ahc
, HCNTRL
, ahc
->pause
);
92 * Since the sequencer can disable pausing in a critical section, we
93 * must loop until it actually stops.
95 while (ahc_is_paused(ahc
) == 0)
98 ahc_pause_bug_fix(ahc
);
102 * Allow the sequencer to continue program execution.
103 * We check here to ensure that no additional interrupt
104 * sources that would cause the sequencer to halt have been
105 * asserted. If, for example, a SCSI bus reset is detected
106 * while we are fielding a different, pausing, interrupt type,
107 * we don't want to release the sequencer before going back
108 * into our interrupt handler and dealing with this new
112 ahc_unpause(struct ahc_softc
*ahc
)
114 if ((ahc_inb(ahc
, INTSTAT
) & (SCSIINT
| SEQINT
| BRKADRINT
)) == 0)
115 ahc_outb(ahc
, HCNTRL
, ahc
->unpause
);
118 /*********************** Untagged Transaction Routines ************************/
119 static __inline
void ahc_freeze_untagged_queues(struct ahc_softc
*ahc
);
120 static __inline
void ahc_release_untagged_queues(struct ahc_softc
*ahc
);
123 * Block our completion routine from starting the next untagged
124 * transaction for this target or target lun.
127 ahc_freeze_untagged_queues(struct ahc_softc
*ahc
)
129 if ((ahc
->flags
& AHC_SCB_BTT
) == 0)
130 ahc
->untagged_queue_lock
++;
134 * Allow the next untagged transaction for this target or target lun
135 * to be executed. We use a counting semaphore to allow the lock
136 * to be acquired recursively. Once the count drops to zero, the
137 * transaction queues will be run.
140 ahc_release_untagged_queues(struct ahc_softc
*ahc
)
142 if ((ahc
->flags
& AHC_SCB_BTT
) == 0) {
143 ahc
->untagged_queue_lock
--;
144 if (ahc
->untagged_queue_lock
== 0)
145 ahc_run_untagged_queues(ahc
);
149 /************************** Memory mapping routines ***************************/
150 static __inline
struct ahc_dma_seg
*
151 ahc_sg_bus_to_virt(struct scb
*scb
,
152 uint32_t sg_busaddr
);
153 static __inline
uint32_t
154 ahc_sg_virt_to_bus(struct scb
*scb
,
155 struct ahc_dma_seg
*sg
);
156 static __inline
uint32_t
157 ahc_hscb_busaddr(struct ahc_softc
*ahc
, u_int index
);
158 static __inline
void ahc_sync_scb(struct ahc_softc
*ahc
,
159 struct scb
*scb
, int op
);
160 static __inline
void ahc_sync_sglist(struct ahc_softc
*ahc
,
161 struct scb
*scb
, int op
);
162 static __inline
uint32_t
163 ahc_targetcmd_offset(struct ahc_softc
*ahc
,
166 static __inline
struct ahc_dma_seg
*
167 ahc_sg_bus_to_virt(struct scb
*scb
, uint32_t sg_busaddr
)
171 sg_index
= (sg_busaddr
- scb
->sg_list_phys
)/sizeof(struct ahc_dma_seg
);
172 /* sg_list_phys points to entry 1, not 0 */
175 return (&scb
->sg_list
[sg_index
]);
178 static __inline
uint32_t
179 ahc_sg_virt_to_bus(struct scb
*scb
, struct ahc_dma_seg
*sg
)
183 /* sg_list_phys points to entry 1, not 0 */
184 sg_index
= sg
- &scb
->sg_list
[1];
186 return (scb
->sg_list_phys
+ (sg_index
* sizeof(*scb
->sg_list
)));
189 static __inline
uint32_t
190 ahc_hscb_busaddr(struct ahc_softc
*ahc
, u_int index
)
192 return (ahc
->scb_data
->hscb_busaddr
193 + (sizeof(struct hardware_scb
) * index
));
197 ahc_sync_scb(struct ahc_softc
*ahc
, struct scb
*scb
, int op
)
199 ahc_dmamap_sync(ahc
, ahc
->scb_data
->hscb_dmat
,
200 ahc
->scb_data
->hscb_dmamap
,
201 /*offset*/(scb
->hscb
- ahc
->hscbs
) * sizeof(*scb
->hscb
),
202 /*len*/sizeof(*scb
->hscb
), op
);
206 ahc_sync_sglist(struct ahc_softc
*ahc
, struct scb
*scb
, int op
)
208 if (scb
->sg_count
== 0)
211 ahc_dmamap_sync(ahc
, ahc
->scb_data
->sg_dmat
, scb
->sg_map
->sg_dmamap
,
212 /*offset*/(scb
->sg_list
- scb
->sg_map
->sg_vaddr
)
213 * sizeof(struct ahc_dma_seg
),
214 /*len*/sizeof(struct ahc_dma_seg
) * scb
->sg_count
, op
);
217 static __inline
uint32_t
218 ahc_targetcmd_offset(struct ahc_softc
*ahc
, u_int index
)
220 return (((uint8_t *)&ahc
->targetcmds
[index
]) - ahc
->qoutfifo
);
223 /******************************** Debugging ***********************************/
224 static __inline
char *ahc_name(struct ahc_softc
*ahc
);
226 static __inline
char *
227 ahc_name(struct ahc_softc
*ahc
)
232 /*********************** Miscelaneous Support Functions ***********************/
234 static __inline
void ahc_update_residual(struct ahc_softc
*ahc
,
236 static __inline
struct ahc_initiator_tinfo
*
237 ahc_fetch_transinfo(struct ahc_softc
*ahc
,
238 char channel
, u_int our_id
,
240 struct ahc_tmode_tstate
**tstate
);
241 static __inline
uint16_t
242 ahc_inw(struct ahc_softc
*ahc
, u_int port
);
243 static __inline
void ahc_outw(struct ahc_softc
*ahc
, u_int port
,
245 static __inline
uint32_t
246 ahc_inl(struct ahc_softc
*ahc
, u_int port
);
247 static __inline
void ahc_outl(struct ahc_softc
*ahc
, u_int port
,
249 static __inline
uint64_t
250 ahc_inq(struct ahc_softc
*ahc
, u_int port
);
251 static __inline
void ahc_outq(struct ahc_softc
*ahc
, u_int port
,
253 static __inline
struct scb
*
254 ahc_get_scb(struct ahc_softc
*ahc
);
255 static __inline
void ahc_free_scb(struct ahc_softc
*ahc
, struct scb
*scb
);
256 static __inline
void ahc_swap_with_next_hscb(struct ahc_softc
*ahc
,
258 static __inline
void ahc_queue_scb(struct ahc_softc
*ahc
, struct scb
*scb
);
259 static __inline
struct scsi_sense_data
*
260 ahc_get_sense_buf(struct ahc_softc
*ahc
,
262 static __inline
uint32_t
263 ahc_get_sense_bufaddr(struct ahc_softc
*ahc
,
267 * Determine whether the sequencer reported a residual
268 * for this SCB/transaction.
271 ahc_update_residual(struct ahc_softc
*ahc
, struct scb
*scb
)
275 sgptr
= ahc_le32toh(scb
->hscb
->sgptr
);
276 if ((sgptr
& SG_RESID_VALID
) != 0)
277 ahc_calc_residual(ahc
, scb
);
281 * Return pointers to the transfer negotiation information
282 * for the specified our_id/remote_id pair.
284 static __inline
struct ahc_initiator_tinfo
*
285 ahc_fetch_transinfo(struct ahc_softc
*ahc
, char channel
, u_int our_id
,
286 u_int remote_id
, struct ahc_tmode_tstate
**tstate
)
289 * Transfer data structures are stored from the perspective
290 * of the target role. Since the parameters for a connection
291 * in the initiator role to a given target are the same as
292 * when the roles are reversed, we pretend we are the target.
296 *tstate
= ahc
->enabled_targets
[our_id
];
297 return (&(*tstate
)->transinfo
[remote_id
]);
300 static __inline
uint16_t
301 ahc_inw(struct ahc_softc
*ahc
, u_int port
)
303 return ((ahc_inb(ahc
, port
+1) << 8) | ahc_inb(ahc
, port
));
307 ahc_outw(struct ahc_softc
*ahc
, u_int port
, u_int value
)
309 ahc_outb(ahc
, port
, value
& 0xFF);
310 ahc_outb(ahc
, port
+1, (value
>> 8) & 0xFF);
313 static __inline
uint32_t
314 ahc_inl(struct ahc_softc
*ahc
, u_int port
)
316 return ((ahc_inb(ahc
, port
))
317 | (ahc_inb(ahc
, port
+1) << 8)
318 | (ahc_inb(ahc
, port
+2) << 16)
319 | (ahc_inb(ahc
, port
+3) << 24));
323 ahc_outl(struct ahc_softc
*ahc
, u_int port
, uint32_t value
)
325 ahc_outb(ahc
, port
, (value
) & 0xFF);
326 ahc_outb(ahc
, port
+1, ((value
) >> 8) & 0xFF);
327 ahc_outb(ahc
, port
+2, ((value
) >> 16) & 0xFF);
328 ahc_outb(ahc
, port
+3, ((value
) >> 24) & 0xFF);
331 static __inline
uint64_t
332 ahc_inq(struct ahc_softc
*ahc
, u_int port
)
334 return ((ahc_inb(ahc
, port
))
335 | (ahc_inb(ahc
, port
+1) << 8)
336 | (ahc_inb(ahc
, port
+2) << 16)
337 | (ahc_inb(ahc
, port
+3) << 24)
338 | (((uint64_t)ahc_inb(ahc
, port
+4)) << 32)
339 | (((uint64_t)ahc_inb(ahc
, port
+5)) << 40)
340 | (((uint64_t)ahc_inb(ahc
, port
+6)) << 48)
341 | (((uint64_t)ahc_inb(ahc
, port
+7)) << 56));
345 ahc_outq(struct ahc_softc
*ahc
, u_int port
, uint64_t value
)
347 ahc_outb(ahc
, port
, value
& 0xFF);
348 ahc_outb(ahc
, port
+1, (value
>> 8) & 0xFF);
349 ahc_outb(ahc
, port
+2, (value
>> 16) & 0xFF);
350 ahc_outb(ahc
, port
+3, (value
>> 24) & 0xFF);
351 ahc_outb(ahc
, port
+4, (value
>> 32) & 0xFF);
352 ahc_outb(ahc
, port
+5, (value
>> 40) & 0xFF);
353 ahc_outb(ahc
, port
+6, (value
>> 48) & 0xFF);
354 ahc_outb(ahc
, port
+7, (value
>> 56) & 0xFF);
358 * Get a free scb. If there are none, see if we can allocate a new SCB.
360 static __inline
struct scb
*
361 ahc_get_scb(struct ahc_softc
*ahc
)
365 if ((scb
= SLIST_FIRST(&ahc
->scb_data
->free_scbs
)) == NULL
) {
367 scb
= SLIST_FIRST(&ahc
->scb_data
->free_scbs
);
371 SLIST_REMOVE_HEAD(&ahc
->scb_data
->free_scbs
, links
.sle
);
376 * Return an SCB resource to the free list.
379 ahc_free_scb(struct ahc_softc
*ahc
, struct scb
*scb
)
381 struct hardware_scb
*hscb
;
384 /* Clean up for the next user */
385 ahc
->scb_data
->scbindex
[hscb
->tag
] = NULL
;
386 scb
->flags
= SCB_FREE
;
389 SLIST_INSERT_HEAD(&ahc
->scb_data
->free_scbs
, scb
, links
.sle
);
391 /* Notify the OSM that a resource is now available. */
392 ahc_platform_scb_free(ahc
, scb
);
395 static __inline
struct scb
*
396 ahc_lookup_scb(struct ahc_softc
*ahc
, u_int tag
)
400 scb
= ahc
->scb_data
->scbindex
[tag
];
402 ahc_sync_scb(ahc
, scb
,
403 BUS_DMASYNC_POSTREAD
|BUS_DMASYNC_POSTWRITE
);
408 ahc_swap_with_next_hscb(struct ahc_softc
*ahc
, struct scb
*scb
)
410 struct hardware_scb
*q_hscb
;
414 * Our queuing method is a bit tricky. The card
415 * knows in advance which HSCB to download, and we
416 * can't disappoint it. To achieve this, the next
417 * SCB to download is saved off in ahc->next_queued_scb.
418 * When we are called to queue "an arbitrary scb",
419 * we copy the contents of the incoming HSCB to the one
420 * the sequencer knows about, swap HSCB pointers and
421 * finally assign the SCB to the tag indexed location
422 * in the scb_array. This makes sure that we can still
423 * locate the correct SCB by SCB_TAG.
425 q_hscb
= ahc
->next_queued_scb
->hscb
;
426 saved_tag
= q_hscb
->tag
;
427 memcpy(q_hscb
, scb
->hscb
, sizeof(*scb
->hscb
));
428 if ((scb
->flags
& SCB_CDB32_PTR
) != 0) {
429 q_hscb
->shared_data
.cdb_ptr
=
430 ahc_htole32(ahc_hscb_busaddr(ahc
, q_hscb
->tag
)
431 + offsetof(struct hardware_scb
, cdb32
));
433 q_hscb
->tag
= saved_tag
;
434 q_hscb
->next
= scb
->hscb
->tag
;
436 /* Now swap HSCB pointers. */
437 ahc
->next_queued_scb
->hscb
= scb
->hscb
;
440 /* Now define the mapping from tag to SCB in the scbindex */
441 ahc
->scb_data
->scbindex
[scb
->hscb
->tag
] = scb
;
445 * Tell the sequencer about a new transaction to execute.
448 ahc_queue_scb(struct ahc_softc
*ahc
, struct scb
*scb
)
450 ahc_swap_with_next_hscb(ahc
, scb
);
452 if (scb
->hscb
->tag
== SCB_LIST_NULL
453 || scb
->hscb
->next
== SCB_LIST_NULL
)
454 panic("Attempt to queue invalid SCB tag %x:%x\n",
455 scb
->hscb
->tag
, scb
->hscb
->next
);
458 * Setup data "oddness".
460 scb
->hscb
->lun
&= LID
;
461 if (ahc_get_transfer_length(scb
) & 0x1)
462 scb
->hscb
->lun
|= SCB_XFERLEN_ODD
;
465 * Keep a history of SCBs we've downloaded in the qinfifo.
467 ahc
->qinfifo
[ahc
->qinfifonext
++] = scb
->hscb
->tag
;
470 * Make sure our data is consistent from the
471 * perspective of the adapter.
473 ahc_sync_scb(ahc
, scb
, BUS_DMASYNC_PREREAD
|BUS_DMASYNC_PREWRITE
);
475 /* Tell the adapter about the newly queued SCB */
476 if ((ahc
->features
& AHC_QUEUE_REGS
) != 0) {
477 ahc_outb(ahc
, HNSCB_QOFF
, ahc
->qinfifonext
);
479 if ((ahc
->features
& AHC_AUTOPAUSE
) == 0)
481 ahc_outb(ahc
, KERNEL_QINPOS
, ahc
->qinfifonext
);
482 if ((ahc
->features
& AHC_AUTOPAUSE
) == 0)
487 static __inline
struct scsi_sense_data
*
488 ahc_get_sense_buf(struct ahc_softc
*ahc
, struct scb
*scb
)
492 offset
= scb
- ahc
->scb_data
->scbarray
;
493 return (&ahc
->scb_data
->sense
[offset
]);
496 static __inline
uint32_t
497 ahc_get_sense_bufaddr(struct ahc_softc
*ahc
, struct scb
*scb
)
501 offset
= scb
- ahc
->scb_data
->scbarray
;
502 return (ahc
->scb_data
->sense_busaddr
503 + (offset
* sizeof(struct scsi_sense_data
)));
506 /************************** Interrupt Processing ******************************/
507 static __inline
void ahc_sync_qoutfifo(struct ahc_softc
*ahc
, int op
);
508 static __inline
void ahc_sync_tqinfifo(struct ahc_softc
*ahc
, int op
);
509 static __inline u_int
ahc_check_cmdcmpltqueues(struct ahc_softc
*ahc
);
510 static __inline
int ahc_intr(struct ahc_softc
*ahc
);
513 ahc_sync_qoutfifo(struct ahc_softc
*ahc
, int op
)
515 ahc_dmamap_sync(ahc
, ahc
->shared_data_dmat
, ahc
->shared_data_dmamap
,
516 /*offset*/0, /*len*/256, op
);
520 ahc_sync_tqinfifo(struct ahc_softc
*ahc
, int op
)
522 #ifdef AHC_TARGET_MODE
523 if ((ahc
->flags
& AHC_TARGETROLE
) != 0) {
524 ahc_dmamap_sync(ahc
, ahc
->shared_data_dmat
,
525 ahc
->shared_data_dmamap
,
526 ahc_targetcmd_offset(ahc
, 0),
527 sizeof(struct target_cmd
) * AHC_TMODE_CMDS
,
534 * See if the firmware has posted any completed commands
535 * into our in-core command complete fifos.
537 #define AHC_RUN_QOUTFIFO 0x1
538 #define AHC_RUN_TQINFIFO 0x2
539 static __inline u_int
540 ahc_check_cmdcmpltqueues(struct ahc_softc
*ahc
)
545 ahc_dmamap_sync(ahc
, ahc
->shared_data_dmat
, ahc
->shared_data_dmamap
,
546 /*offset*/ahc
->qoutfifonext
, /*len*/1,
547 BUS_DMASYNC_POSTREAD
);
548 if (ahc
->qoutfifo
[ahc
->qoutfifonext
] != SCB_LIST_NULL
)
549 retval
|= AHC_RUN_QOUTFIFO
;
550 #ifdef AHC_TARGET_MODE
551 if ((ahc
->flags
& AHC_TARGETROLE
) != 0
552 && (ahc
->flags
& AHC_TQINFIFO_BLOCKED
) == 0) {
553 ahc_dmamap_sync(ahc
, ahc
->shared_data_dmat
,
554 ahc
->shared_data_dmamap
,
555 ahc_targetcmd_offset(ahc
, ahc
->tqinfifofnext
),
556 /*len*/sizeof(struct target_cmd
),
557 BUS_DMASYNC_POSTREAD
);
558 if (ahc
->targetcmds
[ahc
->tqinfifonext
].cmd_valid
!= 0)
559 retval
|= AHC_RUN_TQINFIFO
;
566 * Catch an interrupt from the adapter
569 ahc_intr(struct ahc_softc
*ahc
)
573 if ((ahc
->pause
& INTEN
) == 0) {
575 * Our interrupt is not enabled on the chip
576 * and may be disabled for re-entrancy reasons,
577 * so just return. This is likely just a shared
583 * Instead of directly reading the interrupt status register,
584 * infer the cause of the interrupt by checking our in-core
585 * completion queues. This avoids a costly PCI bus read in
588 if ((ahc
->flags
& (AHC_ALL_INTERRUPTS
|AHC_EDGE_INTERRUPT
)) == 0
589 && (ahc_check_cmdcmpltqueues(ahc
) != 0))
592 intstat
= ahc_inb(ahc
, INTSTAT
);
595 if ((intstat
& INT_PEND
) == 0) {
596 #if AHC_PCI_CONFIG > 0
597 if (ahc
->unsolicited_ints
> 500) {
598 ahc
->unsolicited_ints
= 0;
599 if ((ahc
->chip
& AHC_PCI
) != 0
600 && (ahc_inb(ahc
, ERROR
) & PCIERRSTAT
) != 0)
604 ahc
->unsolicited_ints
++;
607 ahc
->unsolicited_ints
= 0;
609 if (intstat
& CMDCMPLT
) {
610 ahc_outb(ahc
, CLRINT
, CLRCMDINT
);
613 * Ensure that the chip sees that we've cleared
614 * this interrupt before we walk the output fifo.
615 * Otherwise, we may, due to posted bus writes,
616 * clear the interrupt after we finish the scan,
617 * and after the sequencer has added new entries
618 * and asserted the interrupt again.
620 ahc_flush_device_writes(ahc
);
621 ahc_run_qoutfifo(ahc
);
622 #ifdef AHC_TARGET_MODE
623 if ((ahc
->flags
& AHC_TARGETROLE
) != 0)
624 ahc_run_tqinfifo(ahc
, /*paused*/FALSE
);
629 * Handle statuses that may invalidate our cached
630 * copy of INTSTAT separately.
632 if (intstat
== 0xFF && (ahc
->features
& AHC_REMOVABLE
) != 0) {
633 /* Hot eject. Do nothing */
634 } else if (intstat
& BRKADRINT
) {
635 ahc_handle_brkadrint(ahc
);
636 } else if ((intstat
& (SEQINT
|SCSIINT
)) != 0) {
638 ahc_pause_bug_fix(ahc
);
640 if ((intstat
& SEQINT
) != 0)
641 ahc_handle_seqint(ahc
, intstat
);
643 if ((intstat
& SCSIINT
) != 0)
644 ahc_handle_scsiint(ahc
, intstat
);
649 #endif /* _AIC7XXX_INLINE_H_ */