1 /* $NetBSD: dpt.c,v 1.62 2008/06/08 12:43:51 tsutsui Exp $ */
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
34 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
35 * Copyright (c) 2000 Adaptec Corporation
36 * All rights reserved.
38 * TERMS AND CONDITIONS OF USE
40 * Redistribution and use in source form, with or without modification, are
41 * permitted provided that redistributions of source code must retain the
42 * above copyright notice, this list of conditions and the following disclaimer.
44 * This software is provided `as is' by Adaptec and any express or implied
45 * warranties, including, but not limited to, the implied warranties of
46 * merchantability and fitness for a particular purpose, are disclaimed. In no
47 * event shall Adaptec be liable for any direct, indirect, incidental, special,
48 * exemplary or consequential damages (including, but not limited to,
49 * procurement of substitute goods or services; loss of use, data, or profits;
50 * or business interruptions) however caused and on any theory of liability,
51 * whether in contract, strict liability, or tort (including negligence or
52 * otherwise) arising in any way out of the use of this driver software, even
53 * if advised of the possibility of such damage.
57 * Portions of this code fall under the following copyright:
59 * Originally written by Julian Elischer (julian@tfs.com)
60 * for TRW Financial Systems for use under the MACH(2.5) operating system.
62 * TRW Financial Systems, in accordance with their agreement with Carnegie
63 * Mellon University, makes this software available to CMU to distribute
64 * or use in any manner that they see fit as long as this message is kept with
65 * the software. For this reason TFS also grants any other persons or
66 * organisations permission to use or modify this software.
68 * TFS supplies this software to be publicly redistributed
69 * on the understanding that TFS is not responsible for the correct
70 * functioning of this software in any circumstances.
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.62 2008/06/08 12:43:51 tsutsui Exp $");
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
81 #include <sys/endian.h>
83 #include <sys/kauth.h>
86 #include <uvm/uvm_extern.h>
90 #include <machine/pio.h>
91 #include <machine/cputypes.h>
94 #include <dev/scsipi/scsi_all.h>
95 #include <dev/scsipi/scsipi_all.h>
96 #include <dev/scsipi/scsiconf.h>
98 #include <dev/ic/dptreg.h>
99 #include <dev/ic/dptvar.h>
101 #include <dev/i2o/dptivar.h>
104 #define DPRINTF(x) printf x
109 #define dpt_inb(x, o) \
110 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
111 #define dpt_outb(x, o, d) \
112 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
114 static const char * const dpt_cname
[] = {
115 "3334", "SmartRAID IV",
116 "3332", "SmartRAID IV",
117 "2144", "SmartCache IV",
118 "2044", "SmartCache IV",
119 "2142", "SmartCache IV",
120 "2042", "SmartCache IV",
121 "2041", "SmartCache IV",
122 "3224", "SmartRAID III",
123 "3222", "SmartRAID III",
124 "3021", "SmartRAID III",
125 "2124", "SmartCache III",
126 "2024", "SmartCache III",
127 "2122", "SmartCache III",
128 "2022", "SmartCache III",
129 "2021", "SmartCache III",
130 "2012", "SmartCache Plus",
131 "2011", "SmartCache Plus",
135 static void *dpt_sdh
;
137 dev_type_open(dptopen
);
138 dev_type_ioctl(dptioctl
);
140 const struct cdevsw dpt_cdevsw
= {
141 dptopen
, nullclose
, noread
, nowrite
, dptioctl
,
142 nostop
, notty
, nopoll
, nommap
, nokqfilter
, D_OTHER
,
145 extern struct cfdriver dpt_cd
;
147 static struct dpt_sig dpt_sig
= {
148 { 'd', 'P', 't', 'S', 'i', 'G'},
152 #elif defined(powerpc)
156 #elif defined(__mips__)
158 #elif defined(sparc64)
164 PROC_386
| PROC_486
| PROC_PENTIUM
| PROC_SEXIUM
,
171 OS_FREE_BSD
, /* XXX */
183 "" /* Will be filled later */
186 static void dpt_ccb_abort(struct dpt_softc
*, struct dpt_ccb
*);
187 static void dpt_ccb_done(struct dpt_softc
*, struct dpt_ccb
*);
188 static int dpt_ccb_map(struct dpt_softc
*, struct dpt_ccb
*);
189 static int dpt_ccb_poll(struct dpt_softc
*, struct dpt_ccb
*);
190 static void dpt_ccb_unmap(struct dpt_softc
*, struct dpt_ccb
*);
191 static int dpt_cmd(struct dpt_softc
*, struct dpt_ccb
*, int, int);
192 static void dpt_ctlrinfo(struct dpt_softc
*, struct dpt_eata_ctlrinfo
*);
193 static void dpt_hba_inquire(struct dpt_softc
*, struct eata_inquiry_data
**);
194 static void dpt_minphys(struct buf
*);
195 static int dpt_passthrough(struct dpt_softc
*, struct eata_ucp
*,
197 static void dpt_scsipi_request(struct scsipi_channel
*,
198 scsipi_adapter_req_t
, void *);
199 static void dpt_shutdown(void *);
200 static void dpt_sysinfo(struct dpt_softc
*, struct dpt_sysinfo
*);
201 static int dpt_wait(struct dpt_softc
*, u_int8_t
, u_int8_t
, int);
203 static inline struct dpt_ccb
*dpt_ccb_alloc(struct dpt_softc
*);
204 static inline void dpt_ccb_free(struct dpt_softc
*, struct dpt_ccb
*);
206 static inline struct dpt_ccb
*
207 dpt_ccb_alloc(struct dpt_softc
*sc
)
213 ccb
= SLIST_FIRST(&sc
->sc_ccb_free
);
214 SLIST_REMOVE_HEAD(&sc
->sc_ccb_free
, ccb_chain
);
221 dpt_ccb_free(struct dpt_softc
*sc
, struct dpt_ccb
*ccb
)
226 ccb
->ccb_savesp
= NULL
;
228 SLIST_INSERT_HEAD(&sc
->sc_ccb_free
, ccb
, ccb_chain
);
233 * Handle an interrupt from the HBA.
236 dpt_intr(void *cookie
)
238 struct dpt_softc
*sc
;
250 * HBA might have interrupted while we were dealing with the
251 * last completed command, since we ACK before we deal; keep
254 if ((dpt_inb(sc
, HA_AUX_STATUS
) & HA_AUX_INTR
) == 0)
258 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, sc
->sc_stpoff
,
259 sizeof(struct eata_sp
), BUS_DMASYNC_POSTREAD
);
261 /* Might have looped before HBA can reset HBA_AUX_INTR. */
262 if (sp
->sp_ccbid
== -1) {
265 if ((dpt_inb(sc
, HA_AUX_STATUS
) & HA_AUX_INTR
) == 0)
268 printf("%s: no status\n", device_xname(&sc
->sc_dv
));
270 /* Re-sync DMA map */
271 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
,
272 sc
->sc_stpoff
, sizeof(struct eata_sp
),
273 BUS_DMASYNC_POSTREAD
);
276 /* Make sure CCB ID from status packet is realistic. */
277 if ((u_int
)sp
->sp_ccbid
>= sc
->sc_nccbs
) {
278 printf("%s: bogus status (returned CCB id %d)\n",
279 device_xname(&sc
->sc_dv
), sp
->sp_ccbid
);
281 /* Ack the interrupt */
283 junk
= dpt_inb(sc
, HA_STATUS
);
287 /* Sync up DMA map and cache cmd status. */
288 ccb
= sc
->sc_ccbs
+ sp
->sp_ccbid
;
290 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, CCB_OFF(sc
, ccb
),
291 sizeof(struct dpt_ccb
), BUS_DMASYNC_POSTWRITE
);
293 ccb
->ccb_hba_status
= sp
->sp_hba_status
& 0x7f;
294 ccb
->ccb_scsi_status
= sp
->sp_scsi_status
;
295 if (ccb
->ccb_savesp
!= NULL
)
296 memcpy(ccb
->ccb_savesp
, sp
, sizeof(*sp
));
299 * Ack the interrupt and process the CCB. If this
300 * is a private CCB it's up to dpt_ccb_poll() to
304 ccb
->ccb_flg
|= CCB_INTR
;
305 junk
= dpt_inb(sc
, HA_STATUS
);
306 if ((ccb
->ccb_flg
& CCB_PRIVATE
) == 0)
307 dpt_ccb_done(sc
, ccb
);
308 else if ((ccb
->ccb_flg
& CCB_WAIT
) != 0)
316 * Initialize and attach the HBA. This is the entry point from bus
317 * specific probe-and-attach code.
320 dpt_init(struct dpt_softc
*sc
, const char *intrstr
)
322 struct scsipi_adapter
*adapt
;
323 struct scsipi_channel
*chan
;
324 struct eata_inquiry_data
*ei
;
325 int i
, j
, rv
, rseg
, maxchannel
, maxtarget
, mapsize
;
326 bus_dma_segment_t seg
;
329 char model
[__arraycount(ei
->ei_model
) + __arraycount(ei
->ei_suffix
) + 1];
330 char vendor
[__arraycount(ei
->ei_vendor
) + 1];
333 snprintf(dpt_sig
.dsDescription
, sizeof(dpt_sig
.dsDescription
),
334 "NetBSD %s DPT driver", osrelease
);
337 * Allocate the CCB/status packet/scratch DMA map and load.
340 min(be16toh(*(int16_t *)ec
->ec_queuedepth
), DPT_MAX_CCBS
);
341 sc
->sc_stpoff
= sc
->sc_nccbs
* sizeof(struct dpt_ccb
);
342 sc
->sc_scroff
= sc
->sc_stpoff
+ sizeof(struct eata_sp
);
343 mapsize
= sc
->sc_nccbs
* sizeof(struct dpt_ccb
) +
344 DPT_SCRATCH_SIZE
+ sizeof(struct eata_sp
);
346 if ((rv
= bus_dmamem_alloc(sc
->sc_dmat
, mapsize
,
347 PAGE_SIZE
, 0, &seg
, 1, &rseg
, BUS_DMA_NOWAIT
)) != 0) {
348 aprint_error_dev(&sc
->sc_dv
, "unable to allocate CCBs, rv = %d\n", rv
);
352 if ((rv
= bus_dmamem_map(sc
->sc_dmat
, &seg
, rseg
, mapsize
,
353 (void **)&sc
->sc_ccbs
, BUS_DMA_NOWAIT
|BUS_DMA_COHERENT
)) != 0) {
354 aprint_error_dev(&sc
->sc_dv
, "unable to map CCBs, rv = %d\n",
359 if ((rv
= bus_dmamap_create(sc
->sc_dmat
, mapsize
,
360 mapsize
, 1, 0, BUS_DMA_NOWAIT
, &sc
->sc_dmamap
)) != 0) {
361 aprint_error_dev(&sc
->sc_dv
, "unable to create CCB DMA map, rv = %d\n", rv
);
365 if ((rv
= bus_dmamap_load(sc
->sc_dmat
, sc
->sc_dmamap
,
366 sc
->sc_ccbs
, mapsize
, NULL
, BUS_DMA_NOWAIT
)) != 0) {
367 aprint_error_dev(&sc
->sc_dv
, "unable to load CCB DMA map, rv = %d\n", rv
);
371 sc
->sc_stp
= (struct eata_sp
*)((char *)sc
->sc_ccbs
+ sc
->sc_stpoff
);
372 sc
->sc_stppa
= sc
->sc_dmamap
->dm_segs
[0].ds_addr
+ sc
->sc_stpoff
;
373 sc
->sc_scr
= (char *)sc
->sc_ccbs
+ sc
->sc_scroff
;
374 sc
->sc_scrpa
= sc
->sc_dmamap
->dm_segs
[0].ds_addr
+ sc
->sc_scroff
;
375 sc
->sc_stp
->sp_ccbid
= -1;
380 SLIST_INIT(&sc
->sc_ccb_free
);
381 memset(sc
->sc_ccbs
, 0, sizeof(struct dpt_ccb
) * sc
->sc_nccbs
);
383 for (i
= 0, ccb
= sc
->sc_ccbs
; i
< sc
->sc_nccbs
; i
++, ccb
++) {
384 rv
= bus_dmamap_create(sc
->sc_dmat
, DPT_MAX_XFER
,
385 DPT_SG_SIZE
, DPT_MAX_XFER
, 0,
386 BUS_DMA_NOWAIT
| BUS_DMA_ALLOCNOW
,
387 &ccb
->ccb_dmamap_xfer
);
389 aprint_error_dev(&sc
->sc_dv
, "can't create ccb dmamap (%d)\n", rv
);
394 ccb
->ccb_ccbpa
= sc
->sc_dmamap
->dm_segs
[0].ds_addr
+
396 SLIST_INSERT_HEAD(&sc
->sc_ccb_free
, ccb
, ccb_chain
);
400 aprint_error_dev(&sc
->sc_dv
, "unable to create CCBs\n");
402 } else if (i
!= sc
->sc_nccbs
) {
403 aprint_error_dev(&sc
->sc_dv
, "%d/%d CCBs created!\n",
408 /* Set shutdownhook before we start any device activity. */
410 dpt_sdh
= shutdownhook_establish(dpt_shutdown
, NULL
);
412 /* Get the inquiry data from the HBA. */
413 dpt_hba_inquire(sc
, &ei
);
416 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
417 * dpt0: interrupting at irq 10
418 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
420 for (i
= 0; ei
->ei_vendor
[i
] != ' ' && i
< __arraycount(ei
->ei_vendor
);
422 vendor
[i
] = ei
->ei_vendor
[i
];
425 for (i
= 0; ei
->ei_model
[i
] != ' ' && i
< __arraycount(ei
->ei_model
);
427 model
[i
] = ei
->ei_model
[i
];
428 for (j
= 0; ei
->ei_suffix
[j
] != ' ' && j
< __arraycount(ei
->ei_suffix
);
430 model
[i
] = ei
->ei_suffix
[j
];
433 /* Find the marketing name for the board. */
434 for (i
= 0; dpt_cname
[i
] != NULL
; i
+= 2)
435 if (memcmp(ei
->ei_model
+ 2, dpt_cname
[i
], 4) == 0)
438 aprint_normal("%s %s (%s)\n", vendor
, dpt_cname
[i
+ 1], model
);
441 aprint_normal_dev(&sc
->sc_dv
, "interrupting at %s\n",
444 maxchannel
= (ec
->ec_feat3
& EC_F3_MAX_CHANNEL_MASK
) >>
445 EC_F3_MAX_CHANNEL_SHIFT
;
446 maxtarget
= (ec
->ec_feat3
& EC_F3_MAX_TARGET_MASK
) >>
447 EC_F3_MAX_TARGET_SHIFT
;
449 aprint_normal_dev(&sc
->sc_dv
, "%d queued commands, %d channel(s), adapter on ID(s)",
450 sc
->sc_nccbs
, maxchannel
+ 1);
452 for (i
= 0; i
<= maxchannel
; i
++) {
453 sc
->sc_hbaid
[i
] = ec
->ec_hba
[3 - i
];
454 aprint_normal(" %d", sc
->sc_hbaid
[i
]);
459 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do
462 if (dpt_cmd(sc
, NULL
, CP_IMMEDIATE
, CPI_BUS_RESET
))
463 panic("%s: dpt_cmd failed", device_xname(&sc
->sc_dv
));
465 /* Fill in the scsipi_adapter. */
466 adapt
= &sc
->sc_adapt
;
467 memset(adapt
, 0, sizeof(*adapt
));
468 adapt
->adapt_dev
= &sc
->sc_dv
;
469 adapt
->adapt_nchannels
= maxchannel
+ 1;
470 adapt
->adapt_openings
= sc
->sc_nccbs
- 1;
471 adapt
->adapt_max_periph
= sc
->sc_nccbs
- 1;
472 adapt
->adapt_request
= dpt_scsipi_request
;
473 adapt
->adapt_minphys
= dpt_minphys
;
475 for (i
= 0; i
<= maxchannel
; i
++) {
476 /* Fill in the scsipi_channel. */
477 chan
= &sc
->sc_chans
[i
];
478 memset(chan
, 0, sizeof(*chan
));
479 chan
->chan_adapter
= adapt
;
480 chan
->chan_bustype
= &scsi_bustype
;
481 chan
->chan_channel
= i
;
482 chan
->chan_ntargets
= maxtarget
+ 1;
483 chan
->chan_nluns
= ec
->ec_maxlun
+ 1;
484 chan
->chan_id
= sc
->sc_hbaid
[i
];
485 config_found(&sc
->sc_dv
, chan
, scsiprint
);
490 * Read the EATA configuration from the HBA and perform some sanity checks.
493 dpt_readcfg(struct dpt_softc
*sc
)
501 /* Older firmware may puke if we talk to it too soon after reset. */
502 dpt_outb(sc
, HA_COMMAND
, CP_RESET
);
505 for (i
= 1000; i
; i
--) {
506 if ((dpt_inb(sc
, HA_STATUS
) & HA_ST_READY
) != 0)
512 printf("%s: HBA not ready after reset (hba status:%02x)\n",
513 device_xname(&sc
->sc_dv
), dpt_inb(sc
, HA_STATUS
));
517 while((((stat
= dpt_inb(sc
, HA_STATUS
))
518 != (HA_ST_READY
|HA_ST_SEEK_COMPLETE
))
519 && (stat
!= (HA_ST_READY
|HA_ST_SEEK_COMPLETE
|HA_ST_ERROR
))
520 && (stat
!= (HA_ST_READY
|HA_ST_SEEK_COMPLETE
|HA_ST_ERROR
|HA_ST_DRQ
)))
521 || (dpt_wait(sc
, HA_ST_BUSY
, 0, 2000))) {
522 /* RAID drives still spinning up? */
523 if(dpt_inb(sc
, HA_ERROR
) != 'D' ||
524 dpt_inb(sc
, HA_ERROR
+ 1) != 'P' ||
525 dpt_inb(sc
, HA_ERROR
+ 2) != 'T') {
526 printf("%s: HBA not ready\n", device_xname(&sc
->sc_dv
));
532 * Issue the read-config command and wait for the data to appear.
534 * Apparently certian firmware revisions won't DMA later on if we
535 * request the config data using PIO, but it makes it a lot easier
536 * as no DMA setup is required.
538 dpt_outb(sc
, HA_COMMAND
, CP_PIO_GETCFG
);
539 memset(ec
, 0, sizeof(*ec
));
540 i
= ((int)&((struct eata_cfg
*)0)->ec_cfglen
+
541 sizeof(ec
->ec_cfglen
)) >> 1;
544 if (dpt_wait(sc
, 0xFF, HA_ST_DATA_RDY
, 2000)) {
545 printf("%s: cfg data didn't appear (hba status:%02x)\n",
546 device_xname(&sc
->sc_dv
), dpt_inb(sc
, HA_STATUS
));
552 *p
++ = bus_space_read_stream_2(sc
->sc_iot
, sc
->sc_ioh
, HA_DATA
);
554 if ((i
= ec
->ec_cfglen
) > (sizeof(struct eata_cfg
)
555 - (int)(&(((struct eata_cfg
*)0L)->ec_cfglen
))
556 - sizeof(ec
->ec_cfglen
)))
557 i
= sizeof(struct eata_cfg
)
558 - (int)(&(((struct eata_cfg
*)0L)->ec_cfglen
))
559 - sizeof(ec
->ec_cfglen
);
561 j
= i
+ (int)(&(((struct eata_cfg
*)0L)->ec_cfglen
)) +
562 sizeof(ec
->ec_cfglen
);
566 *p
++ = bus_space_read_stream_2(sc
->sc_iot
, sc
->sc_ioh
, HA_DATA
);
568 /* Flush until we have read 512 bytes. */
569 i
= (512 - j
+ 1) >> 1;
571 (void)bus_space_read_stream_2(sc
->sc_iot
, sc
->sc_ioh
, HA_DATA
);
573 /* Defaults for older firmware... */
574 if (p
<= (u_short
*)&ec
->ec_hba
[DPT_MAX_CHANNELS
- 1])
575 ec
->ec_hba
[DPT_MAX_CHANNELS
- 1] = 7;
577 if ((dpt_inb(sc
, HA_STATUS
) & HA_ST_ERROR
) != 0) {
578 aprint_error_dev(&sc
->sc_dv
, "HBA error\n");
582 if (memcmp(ec
->ec_eatasig
, "EATA", 4) != 0) {
583 aprint_error_dev(&sc
->sc_dv
, "EATA signature mismatch\n");
587 if ((ec
->ec_feat0
& EC_F0_HBA_VALID
) == 0) {
588 aprint_error_dev(&sc
->sc_dv
, "ec_hba field invalid\n");
592 if ((ec
->ec_feat0
& EC_F0_DMA_SUPPORTED
) == 0) {
593 aprint_error_dev(&sc
->sc_dv
, "DMA not supported\n");
601 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all
602 * data from it's cache and mark array groups as clean.
604 * XXX This doesn't always work (i.e., the HBA may still be flushing after
605 * we tell root that it's safe to power off).
608 dpt_shutdown(void *cookie
)
610 extern struct cfdriver dpt_cd
;
611 struct dpt_softc
*sc
;
614 printf("shutting down dpt devices...");
616 for (i
= 0; i
< dpt_cd
.cd_ndevs
; i
++) {
617 if ((sc
= device_lookup_private(&dpt_cd
, i
)) == NULL
)
619 dpt_cmd(sc
, NULL
, CP_IMMEDIATE
, CPI_POWEROFF_WARN
);
627 * Send an EATA command to the HBA.
630 dpt_cmd(struct dpt_softc
*sc
, struct dpt_ccb
*ccb
, int eatacmd
, int icmd
)
637 for (i
= 20000; i
!= 0; i
--) {
638 if ((dpt_inb(sc
, HA_AUX_STATUS
) & HA_AUX_BUSY
) == 0)
647 pa
= (ccb
!= NULL
? ccb
->ccb_ccbpa
: 0);
648 dpt_outb(sc
, HA_DMA_BASE
+ 0, (pa
) & 0xff);
649 dpt_outb(sc
, HA_DMA_BASE
+ 1, (pa
>> 8) & 0xff);
650 dpt_outb(sc
, HA_DMA_BASE
+ 2, (pa
>> 16) & 0xff);
651 dpt_outb(sc
, HA_DMA_BASE
+ 3, (pa
>> 24) & 0xff);
653 if (eatacmd
== CP_IMMEDIATE
)
654 dpt_outb(sc
, HA_ICMD
, icmd
);
656 dpt_outb(sc
, HA_COMMAND
, eatacmd
);
663 * Wait for the HBA status register to reach a specific state.
666 dpt_wait(struct dpt_softc
*sc
, u_int8_t mask
, u_int8_t state
, int ms
)
669 for (ms
*= 10; ms
!= 0; ms
--) {
670 if ((dpt_inb(sc
, HA_STATUS
) & mask
) == state
)
679 * Spin waiting for a command to finish. The timeout value from the CCB is
680 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
681 * recycled before we get a look at it.
684 dpt_ccb_poll(struct dpt_softc
*sc
, struct dpt_ccb
*ccb
)
689 if ((ccb
->ccb_flg
& CCB_PRIVATE
) == 0)
690 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
695 if ((ccb
->ccb_flg
& CCB_INTR
) != 0) {
700 for (i
= ccb
->ccb_timeout
* 20; i
!= 0; i
--) {
701 if ((dpt_inb(sc
, HA_AUX_STATUS
) & HA_AUX_INTR
) != 0)
703 if ((ccb
->ccb_flg
& CCB_INTR
) != 0)
713 * We have a command which has been processed by the HBA, so now we look to
714 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here
718 dpt_ccb_done(struct dpt_softc
*sc
, struct dpt_ccb
*ccb
)
720 struct scsipi_xfer
*xs
;
724 SC_DEBUG(xs
->xs_periph
, SCSIPI_DB2
, ("dpt_ccb_done\n"));
727 * If we were a data transfer, unload the map that described the
730 if (xs
->datalen
!= 0)
731 dpt_ccb_unmap(sc
, ccb
);
733 if (xs
->error
== XS_NOERROR
) {
734 if (ccb
->ccb_hba_status
!= SP_HBA_NO_ERROR
) {
735 switch (ccb
->ccb_hba_status
) {
736 case SP_HBA_ERROR_SEL_TO
:
737 xs
->error
= XS_SELTIMEOUT
;
739 case SP_HBA_ERROR_RESET
:
740 xs
->error
= XS_RESET
;
743 printf("%s: HBA status %x\n",
744 device_xname(&sc
->sc_dv
), ccb
->ccb_hba_status
);
745 xs
->error
= XS_DRIVER_STUFFUP
;
748 } else if (ccb
->ccb_scsi_status
!= SCSI_OK
) {
749 switch (ccb
->ccb_scsi_status
) {
751 memcpy(&xs
->sense
.scsi_sense
, &ccb
->ccb_sense
,
752 sizeof(xs
->sense
.scsi_sense
));
753 xs
->error
= XS_SENSE
;
756 case SCSI_QUEUE_FULL
:
760 scsipi_printaddr(xs
->xs_periph
);
761 printf("SCSI status %x\n",
762 ccb
->ccb_scsi_status
);
763 xs
->error
= XS_DRIVER_STUFFUP
;
769 xs
->status
= ccb
->ccb_scsi_status
;
772 /* Free up the CCB and mark the command as done. */
773 dpt_ccb_free(sc
, ccb
);
778 * Specified CCB has timed out, abort it.
781 dpt_ccb_abort(struct dpt_softc
*sc
, struct dpt_ccb
*ccb
)
783 struct scsipi_periph
*periph
;
784 struct scsipi_xfer
*xs
;
788 periph
= xs
->xs_periph
;
790 scsipi_printaddr(periph
);
791 printf("timed out (status:%02x aux status:%02x)",
792 dpt_inb(sc
, HA_STATUS
), dpt_inb(sc
, HA_AUX_STATUS
));
796 if ((ccb
->ccb_flg
& CCB_ABORT
) != 0) {
797 /* Abort timed out, reset the HBA */
798 printf(" AGAIN, resetting HBA\n");
799 dpt_outb(sc
, HA_COMMAND
, CP_RESET
);
802 /* Abort the operation that has timed out */
804 xs
->error
= XS_TIMEOUT
;
805 ccb
->ccb_timeout
= DPT_ABORT_TIMEOUT
;
806 ccb
->ccb_flg
|= CCB_ABORT
;
807 /* Start the abort */
808 if (dpt_cmd(sc
, ccb
, CP_IMMEDIATE
, CPI_SPEC_ABORT
))
809 aprint_error_dev(&sc
->sc_dv
, "dpt_cmd failed\n");
816 * Map a data transfer.
819 dpt_ccb_map(struct dpt_softc
*sc
, struct dpt_ccb
*ccb
)
821 struct scsipi_xfer
*xs
;
823 bus_dma_segment_t
*ds
;
829 xfer
= ccb
->ccb_dmamap_xfer
;
830 cp
= &ccb
->ccb_eata_cp
;
832 rv
= bus_dmamap_load(sc
->sc_dmat
, xfer
, xs
->data
, xs
->datalen
, NULL
,
833 ((xs
->xs_control
& XS_CTL_NOSLEEP
) != 0 ?
834 BUS_DMA_NOWAIT
: BUS_DMA_WAITOK
) | BUS_DMA_STREAMING
|
835 ((xs
->xs_control
& XS_CTL_DATA_IN
) ? BUS_DMA_READ
: BUS_DMA_WRITE
));
842 xs
->error
= XS_RESOURCE_SHORTAGE
;
845 xs
->error
= XS_DRIVER_STUFFUP
;
846 printf("%s: error %d loading map\n", device_xname(&sc
->sc_dv
), rv
);
850 if (xs
->error
!= XS_NOERROR
) {
851 dpt_ccb_free(sc
, ccb
);
856 bus_dmamap_sync(sc
->sc_dmat
, xfer
, 0, xfer
->dm_mapsize
,
857 (xs
->xs_control
& XS_CTL_DATA_IN
) != 0 ? BUS_DMASYNC_PREREAD
:
858 BUS_DMASYNC_PREWRITE
);
860 /* Don't bother using scatter/gather for just 1 seg */
861 if (xfer
->dm_nsegs
== 1) {
862 cp
->cp_dataaddr
= htobe32(xfer
->dm_segs
[0].ds_addr
);
863 cp
->cp_datalen
= htobe32(xfer
->dm_segs
[0].ds_len
);
866 * Load the hardware scatter/gather map with
867 * the contents of the DMA map.
871 for (i
= 0; i
< xfer
->dm_nsegs
; i
++, sg
++, ds
++) {
872 sg
->sg_addr
= htobe32(ds
->ds_addr
);
873 sg
->sg_len
= htobe32(ds
->ds_len
);
875 cp
->cp_dataaddr
= htobe32(CCB_OFF(sc
, ccb
) +
876 sc
->sc_dmamap
->dm_segs
[0].ds_addr
+
877 offsetof(struct dpt_ccb
, ccb_sg
));
878 cp
->cp_datalen
= htobe32(i
* sizeof(struct eata_sg
));
879 cp
->cp_ctl0
|= CP_C0_SCATTER
;
889 dpt_ccb_unmap(struct dpt_softc
*sc
, struct dpt_ccb
*ccb
)
892 bus_dmamap_sync(sc
->sc_dmat
, ccb
->ccb_dmamap_xfer
, 0,
893 ccb
->ccb_dmamap_xfer
->dm_mapsize
,
894 (ccb
->ccb_eata_cp
.cp_ctl0
& CP_C0_DATA_IN
) != 0 ?
895 BUS_DMASYNC_POSTREAD
: BUS_DMASYNC_POSTWRITE
);
896 bus_dmamap_unload(sc
->sc_dmat
, ccb
->ccb_dmamap_xfer
);
900 * Adjust the size of each I/O before it passes to the SCSI layer.
903 dpt_minphys(struct buf
*bp
)
906 if (bp
->b_bcount
> DPT_MAX_XFER
)
907 bp
->b_bcount
= DPT_MAX_XFER
;
912 * Start a SCSI command.
915 dpt_scsipi_request(struct scsipi_channel
*chan
, scsipi_adapter_req_t req
,
918 struct dpt_softc
*sc
;
919 struct scsipi_xfer
*xs
;
921 struct scsipi_periph
*periph
;
925 sc
= (struct dpt_softc
*)chan
->chan_adapter
->adapt_dev
;
928 case ADAPTER_REQ_RUN_XFER
:
930 periph
= xs
->xs_periph
;
931 flags
= xs
->xs_control
;
934 /* Cmds must be no more than 12 bytes for us. */
935 if (xs
->cmdlen
> 12) {
936 xs
->error
= XS_DRIVER_STUFFUP
;
942 * XXX We can't reset devices just yet. Apparently some
943 * older firmware revisions don't even support it.
945 if ((flags
& XS_CTL_RESET
) != 0) {
946 xs
->error
= XS_DRIVER_STUFFUP
;
952 * Get a CCB and fill it.
954 ccb
= dpt_ccb_alloc(sc
);
956 ccb
->ccb_timeout
= xs
->timeout
;
958 cp
= &ccb
->ccb_eata_cp
;
959 memcpy(&cp
->cp_cdb_cmd
, xs
->cmd
, xs
->cmdlen
);
960 cp
->cp_ccbid
= ccb
->ccb_id
;
961 cp
->cp_senselen
= sizeof(ccb
->ccb_sense
);
962 cp
->cp_stataddr
= htobe32(sc
->sc_stppa
);
963 cp
->cp_ctl0
= CP_C0_AUTO_SENSE
;
966 cp
->cp_ctl3
= periph
->periph_target
<< CP_C3_ID_SHIFT
;
967 cp
->cp_ctl3
|= chan
->chan_channel
<< CP_C3_CHANNEL_SHIFT
;
968 cp
->cp_ctl4
= periph
->periph_lun
<< CP_C4_LUN_SHIFT
;
969 cp
->cp_ctl4
|= CP_C4_DIS_PRI
| CP_C4_IDENTIFY
;
971 if ((flags
& XS_CTL_DATA_IN
) != 0)
972 cp
->cp_ctl0
|= CP_C0_DATA_IN
;
973 if ((flags
& XS_CTL_DATA_OUT
) != 0)
974 cp
->cp_ctl0
|= CP_C0_DATA_OUT
;
975 if (sc
->sc_hbaid
[chan
->chan_channel
] == periph
->periph_target
)
976 cp
->cp_ctl0
|= CP_C0_INTERPRET
;
978 /* Synchronous xfers musn't write-back through the cache. */
980 if ((xs
->bp
->b_flags
& (B_ASYNC
| B_READ
)) == 0)
981 cp
->cp_ctl2
|= CP_C2_NO_CACHE
;
984 htobe32(sc
->sc_dmamap
->dm_segs
[0].ds_addr
+
985 CCB_OFF(sc
, ccb
) + offsetof(struct dpt_ccb
, ccb_sense
));
987 if (xs
->datalen
!= 0) {
988 if (dpt_ccb_map(sc
, ccb
))
995 /* Sync up CCB and status packet. */
996 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
,
997 CCB_OFF(sc
, ccb
), sizeof(struct dpt_ccb
),
998 BUS_DMASYNC_PREWRITE
);
999 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, sc
->sc_stpoff
,
1000 sizeof(struct eata_sp
), BUS_DMASYNC_PREREAD
);
1003 * Start the command.
1005 if ((xs
->xs_control
& XS_CTL_POLL
) != 0)
1006 ccb
->ccb_flg
|= CCB_PRIVATE
;
1008 if (dpt_cmd(sc
, ccb
, CP_DMA_CMD
, 0)) {
1009 aprint_error_dev(&sc
->sc_dv
, "dpt_cmd failed\n");
1010 xs
->error
= XS_DRIVER_STUFFUP
;
1011 if (xs
->datalen
!= 0)
1012 dpt_ccb_unmap(sc
, ccb
);
1013 dpt_ccb_free(sc
, ccb
);
1017 if ((xs
->xs_control
& XS_CTL_POLL
) == 0)
1020 if (dpt_ccb_poll(sc
, ccb
)) {
1021 dpt_ccb_abort(sc
, ccb
);
1022 /* Wait for abort to complete... */
1023 if (dpt_ccb_poll(sc
, ccb
))
1024 dpt_ccb_abort(sc
, ccb
);
1027 dpt_ccb_done(sc
, ccb
);
1030 case ADAPTER_REQ_GROW_RESOURCES
:
1032 * Not supported, since we allocate the maximum number of
1037 case ADAPTER_REQ_SET_XFER_MODE
:
1039 * This will be handled by the HBA itself, and we can't
1040 * modify that (ditto for tagged queueing).
1047 * Get inquiry data from the adapter.
1050 dpt_hba_inquire(struct dpt_softc
*sc
, struct eata_inquiry_data
**ei
)
1052 struct dpt_ccb
*ccb
;
1055 *ei
= (struct eata_inquiry_data
*)sc
->sc_scr
;
1057 /* Get a CCB and mark as private */
1058 ccb
= dpt_ccb_alloc(sc
);
1059 ccb
->ccb_flg
|= CCB_PRIVATE
;
1060 ccb
->ccb_timeout
= 200;
1062 /* Put all the arguments into the CCB. */
1063 cp
= &ccb
->ccb_eata_cp
;
1064 cp
->cp_ccbid
= ccb
->ccb_id
;
1065 cp
->cp_senselen
= sizeof(ccb
->ccb_sense
);
1066 cp
->cp_senseaddr
= 0;
1067 cp
->cp_stataddr
= htobe32(sc
->sc_stppa
);
1068 cp
->cp_dataaddr
= htobe32(sc
->sc_scrpa
);
1069 cp
->cp_datalen
= htobe32(sizeof(struct eata_inquiry_data
));
1070 cp
->cp_ctl0
= CP_C0_DATA_IN
| CP_C0_INTERPRET
;
1073 cp
->cp_ctl3
= sc
->sc_hbaid
[0] << CP_C3_ID_SHIFT
;
1074 cp
->cp_ctl4
= CP_C4_DIS_PRI
| CP_C4_IDENTIFY
;
1076 /* Put together the SCSI inquiry command. */
1077 memset(&cp
->cp_cdb_cmd
, 0, 12);
1078 cp
->cp_cdb_cmd
= INQUIRY
;
1079 cp
->cp_cdb_len
= sizeof(struct eata_inquiry_data
);
1081 /* Sync up CCB, status packet and scratch area. */
1082 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, CCB_OFF(sc
, ccb
),
1083 sizeof(struct dpt_ccb
), BUS_DMASYNC_PREWRITE
);
1084 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, sc
->sc_stpoff
,
1085 sizeof(struct eata_sp
), BUS_DMASYNC_PREREAD
);
1086 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, sc
->sc_scroff
,
1087 sizeof(struct eata_inquiry_data
), BUS_DMASYNC_PREREAD
);
1089 /* Start the command and poll on completion. */
1090 if (dpt_cmd(sc
, ccb
, CP_DMA_CMD
, 0))
1091 panic("%s: dpt_cmd failed", device_xname(&sc
->sc_dv
));
1093 if (dpt_ccb_poll(sc
, ccb
))
1094 panic("%s: inquiry timed out", device_xname(&sc
->sc_dv
));
1096 if (ccb
->ccb_hba_status
!= SP_HBA_NO_ERROR
||
1097 ccb
->ccb_scsi_status
!= SCSI_OK
)
1098 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1099 device_xname(&sc
->sc_dv
), ccb
->ccb_hba_status
,
1100 ccb
->ccb_scsi_status
);
1102 /* Sync up the DMA map and free CCB, returning. */
1103 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, sc
->sc_scroff
,
1104 sizeof(struct eata_inquiry_data
), BUS_DMASYNC_POSTREAD
);
1105 dpt_ccb_free(sc
, ccb
);
1109 dptopen(dev_t dev
, int flag
, int mode
, struct lwp
*l
)
1112 if (device_lookup(&dpt_cd
, minor(dev
)) == NULL
)
1119 dptioctl(dev_t dev
, u_long cmd
, void *data
, int flag
, struct lwp
*l
)
1121 struct dpt_softc
*sc
;
1124 sc
= device_lookup_private(&dpt_cd
, minor(dev
));
1126 switch (cmd
& 0xffff) {
1128 memcpy(data
, &dpt_sig
, min(IOCPARM_LEN(cmd
), sizeof(dpt_sig
)));
1132 dpt_ctlrinfo(sc
, (struct dpt_eata_ctlrinfo
*)data
);
1136 dpt_sysinfo(sc
, (struct dpt_sysinfo
*)data
);
1141 * XXX Don't know how to get this from EATA boards. I think
1142 * it involves waiting for a "DPT" sequence from HA_ERROR
1143 * and then reading one of the HA_ICMD registers.
1148 case DPT_EATAUSRCMD
:
1149 rv
= kauth_authorize_device_passthru(l
->l_cred
, dev
,
1150 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL
, data
);
1154 if (IOCPARM_LEN(cmd
) < sizeof(struct eata_ucp
)) {
1155 DPRINTF(("%s: ucp %lu vs %lu bytes\n",
1156 device_xname(&sc
->sc_dv
), IOCPARM_LEN(cmd
),
1157 (unsigned long int)sizeof(struct eata_ucp
)));
1161 if (sc
->sc_uactive
++)
1162 tsleep(&sc
->sc_uactive
, PRIBIO
, "dptslp", 0);
1164 rv
= dpt_passthrough(sc
, (struct eata_ucp
*)data
, l
);
1167 wakeup_one(&sc
->sc_uactive
);
1171 DPRINTF(("%s: unknown ioctl %lx\n", device_xname(&sc
->sc_dv
), cmd
));
1179 dpt_ctlrinfo(struct dpt_softc
*sc
, struct dpt_eata_ctlrinfo
*info
)
1182 memset(info
, 0, sizeof(*info
));
1183 info
->id
= sc
->sc_hbaid
[0];
1184 info
->vect
= sc
->sc_isairq
;
1185 info
->base
= sc
->sc_isaport
;
1186 info
->qdepth
= sc
->sc_nccbs
;
1187 info
->sgsize
= DPT_SG_SIZE
* sizeof(struct eata_sg
);
1190 info
->do_drive32
= 1;
1192 info
->cpLength
= sizeof(struct eata_cp
);
1193 info
->spLength
= sizeof(struct eata_sp
);
1194 info
->drqNum
= sc
->sc_isadrq
;
1198 dpt_sysinfo(struct dpt_softc
*sc
, struct dpt_sysinfo
*info
)
1204 memset(info
, 0, sizeof(*info
));
1214 info
->drive0CMOS
= j
;
1221 info
->drive1CMOS
= j
;
1222 info
->processorFamily
= dpt_sig
.dsProcessorFamily
;
1225 * Get the conventional memory size from CMOS.
1232 info
->conventionalMemSize
= j
;
1235 * Get the extended memory size from CMOS.
1242 info
->extendedMemSize
= j
;
1244 switch (cpu_class
) {
1246 info
->processorType
= PROC_386
;
1249 info
->processorType
= PROC_486
;
1252 info
->processorType
= PROC_PENTIUM
;
1256 info
->processorType
= PROC_SEXIUM
;
1260 info
->flags
= SI_CMOS_Valid
| SI_BusTypeValid
|
1261 SI_MemorySizeValid
| SI_NO_SmartROM
;
1263 info
->flags
= SI_BusTypeValid
| SI_NO_SmartROM
;
1266 info
->busType
= sc
->sc_bustype
;
1270 dpt_passthrough(struct dpt_softc
*sc
, struct eata_ucp
*ucp
, struct lwp
*l
)
1272 struct dpt_ccb
*ccb
;
1276 bus_dmamap_t xfer
= 0; /* XXX: gcc */
1277 bus_dma_segment_t
*ds
;
1278 int datain
= 0, s
, rv
= 0, i
, uslen
; /* XXX: gcc */
1281 * Get a CCB and fill.
1283 ccb
= dpt_ccb_alloc(sc
);
1284 ccb
->ccb_flg
|= CCB_PRIVATE
| CCB_WAIT
;
1285 ccb
->ccb_timeout
= 0;
1286 ccb
->ccb_savesp
= &sp
;
1288 cp
= &ccb
->ccb_eata_cp
;
1289 memcpy(cp
, ucp
->ucp_cp
, sizeof(ucp
->ucp_cp
));
1290 uslen
= cp
->cp_senselen
;
1291 cp
->cp_ccbid
= ccb
->ccb_id
;
1292 cp
->cp_senselen
= sizeof(ccb
->ccb_sense
);
1293 cp
->cp_senseaddr
= htobe32(sc
->sc_dmamap
->dm_segs
[0].ds_addr
+
1294 CCB_OFF(sc
, ccb
) + offsetof(struct dpt_ccb
, ccb_sense
));
1295 cp
->cp_stataddr
= htobe32(sc
->sc_stppa
);
1298 * Map data transfers.
1300 if (ucp
->ucp_dataaddr
&& ucp
->ucp_datalen
) {
1301 xfer
= ccb
->ccb_dmamap_xfer
;
1302 datain
= ((cp
->cp_ctl0
& CP_C0_DATA_IN
) != 0);
1304 if (ucp
->ucp_datalen
> DPT_MAX_XFER
) {
1305 DPRINTF(("%s: xfer too big\n", device_xname(&sc
->sc_dv
)));
1306 dpt_ccb_free(sc
, ccb
);
1309 rv
= bus_dmamap_load(sc
->sc_dmat
, xfer
,
1310 ucp
->ucp_dataaddr
, ucp
->ucp_datalen
, l
->l_proc
,
1311 BUS_DMA_WAITOK
| BUS_DMA_STREAMING
|
1312 (datain
? BUS_DMA_READ
: BUS_DMA_WRITE
));
1314 DPRINTF(("%s: map failed; %d\n", device_xname(&sc
->sc_dv
),
1316 dpt_ccb_free(sc
, ccb
);
1320 bus_dmamap_sync(sc
->sc_dmat
, xfer
, 0, xfer
->dm_mapsize
,
1321 (datain
? BUS_DMASYNC_PREREAD
: BUS_DMASYNC_PREWRITE
));
1325 for (i
= 0; i
< xfer
->dm_nsegs
; i
++, sg
++, ds
++) {
1326 sg
->sg_addr
= htobe32(ds
->ds_addr
);
1327 sg
->sg_len
= htobe32(ds
->ds_len
);
1329 cp
->cp_dataaddr
= htobe32(CCB_OFF(sc
, ccb
) +
1330 sc
->sc_dmamap
->dm_segs
[0].ds_addr
+
1331 offsetof(struct dpt_ccb
, ccb_sg
));
1332 cp
->cp_datalen
= htobe32(i
* sizeof(struct eata_sg
));
1333 cp
->cp_ctl0
|= CP_C0_SCATTER
;
1335 cp
->cp_dataaddr
= 0;
1340 * Start the command and sleep on completion.
1342 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, CCB_OFF(sc
, ccb
),
1343 sizeof(struct dpt_ccb
), BUS_DMASYNC_PREWRITE
);
1345 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, sc
->sc_stpoff
,
1346 sizeof(struct eata_sp
), BUS_DMASYNC_PREREAD
);
1347 if (dpt_cmd(sc
, ccb
, CP_DMA_CMD
, 0))
1348 panic("%s: dpt_cmd failed", device_xname(&sc
->sc_dv
));
1349 tsleep(ccb
, PWAIT
, "dptucmd", 0);
1353 * Sync up the DMA map and copy out results.
1355 bus_dmamap_sync(sc
->sc_dmat
, sc
->sc_dmamap
, CCB_OFF(sc
, ccb
),
1356 sizeof(struct dpt_ccb
), BUS_DMASYNC_POSTWRITE
);
1358 if (cp
->cp_datalen
!= 0) {
1359 bus_dmamap_sync(sc
->sc_dmat
, xfer
, 0, xfer
->dm_mapsize
,
1360 (datain
? BUS_DMASYNC_POSTREAD
: BUS_DMASYNC_POSTWRITE
));
1361 bus_dmamap_unload(sc
->sc_dmat
, xfer
);
1364 if (ucp
->ucp_stataddr
!= NULL
) {
1365 rv
= copyout(&sp
, ucp
->ucp_stataddr
, sizeof(sp
));
1367 DPRINTF(("%s: sp copyout() failed\n",
1368 device_xname(&sc
->sc_dv
)));
1371 if (rv
== 0 && ucp
->ucp_senseaddr
!= NULL
) {
1372 i
= min(uslen
, sizeof(ccb
->ccb_sense
));
1373 rv
= copyout(&ccb
->ccb_sense
, ucp
->ucp_senseaddr
, i
);
1375 DPRINTF(("%s: sense copyout() failed\n",
1376 device_xname(&sc
->sc_dv
)));
1380 ucp
->ucp_hstatus
= (u_int8_t
)ccb
->ccb_hba_status
;
1381 ucp
->ucp_tstatus
= (u_int8_t
)ccb
->ccb_scsi_status
;
1382 dpt_ccb_free(sc
, ccb
);