4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
29 * SCSA HBA nexus driver that emulates an HBA connected to SCSI target
30 * devices (large disks).
37 #include <sys/scsi/scsi.h>
39 #include <sys/sunddi.h>
40 #include <sys/taskq.h>
42 #include <sys/types.h>
44 #include <sys/cpuvar.h>
45 #include <sys/dklabel.h>
47 #include <sys/emul64.h>
48 #include <sys/emul64cmd.h>
49 #include <sys/emul64var.h>
51 int emul64_usetaskq
= 1; /* set to zero for debugging */
54 static int emul64_cdb_debug
= 0;
55 #include <sys/debug.h>
59 * cb_ops function prototypes
61 static int emul64_ioctl(dev_t
, int cmd
, intptr_t arg
, int mode
,
62 cred_t
*credp
, int *rvalp
);
65 * dev_ops functions prototypes
67 static int emul64_info(dev_info_t
*dip
, ddi_info_cmd_t infocmd
,
68 void *arg
, void **result
);
69 static int emul64_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
);
70 static int emul64_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
);
75 * SCSA functions exported by means of the transport table
77 static int emul64_tran_tgt_init(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
78 scsi_hba_tran_t
*tran
, struct scsi_device
*sd
);
79 static int emul64_scsi_start(struct scsi_address
*ap
, struct scsi_pkt
*pkt
);
80 static void emul64_pkt_comp(void *);
81 static int emul64_scsi_abort(struct scsi_address
*ap
, struct scsi_pkt
*pkt
);
82 static int emul64_scsi_reset(struct scsi_address
*ap
, int level
);
83 static int emul64_scsi_getcap(struct scsi_address
*ap
, char *cap
, int whom
);
84 static int emul64_scsi_setcap(struct scsi_address
*ap
, char *cap
, int value
,
86 static struct scsi_pkt
*emul64_scsi_init_pkt(struct scsi_address
*ap
,
87 struct scsi_pkt
*pkt
, struct buf
*bp
, int cmdlen
, int statuslen
,
88 int tgtlen
, int flags
, int (*callback
)(), caddr_t arg
);
89 static void emul64_scsi_destroy_pkt(struct scsi_address
*ap
,
90 struct scsi_pkt
*pkt
);
91 static void emul64_scsi_dmafree(struct scsi_address
*ap
, struct scsi_pkt
*pkt
);
92 static void emul64_scsi_sync_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
);
93 static int emul64_scsi_reset_notify(struct scsi_address
*ap
, int flag
,
94 void (*callback
)(caddr_t
), caddr_t arg
);
99 static void emul64_i_initcap(struct emul64
*emul64
);
101 static void emul64_i_log(struct emul64
*emul64
, int level
, char *fmt
, ...);
102 static int emul64_get_tgtrange(struct emul64
*,
105 emul64_tgt_range_t
*);
106 static int emul64_write_off(struct emul64
*,
108 emul64_tgt_range_t
*);
109 static int emul64_write_on(struct emul64
*,
111 emul64_tgt_range_t
*);
112 static emul64_nowrite_t
*emul64_nowrite_alloc(emul64_range_t
*);
113 static void emul64_nowrite_free(emul64_nowrite_t
*);
114 static emul64_nowrite_t
*emul64_find_nowrite(emul64_tgt_t
*,
115 diskaddr_t start_block
,
117 emul64_rng_overlap_t
*overlapp
,
118 emul64_nowrite_t
***prevp
);
120 extern emul64_tgt_t
*find_tgt(struct emul64
*, ushort_t
, ushort_t
);
123 static void emul64_debug_dump_cdb(struct scsi_address
*ap
,
124 struct scsi_pkt
*pkt
);
129 static int ddi_in_panic(void);
130 static int ddi_in_panic() { return (0); }
131 #ifndef SCSI_CAP_RESET_NOTIFICATION
132 #define SCSI_CAP_RESET_NOTIFICATION 14
134 #ifndef SCSI_RESET_NOTIFY
135 #define SCSI_RESET_NOTIFY 0x01
137 #ifndef SCSI_RESET_CANCEL
138 #define SCSI_RESET_CANCEL 0x02
146 * The taskq facility is used to queue up SCSI start requests on a per
147 * controller basis. If the maximum number of queued tasks is hit,
148 * taskq_ent_alloc() delays for a second, which adversely impacts our
149 * performance. This value establishes the maximum number of task
150 * queue entries when taskq_create is called.
152 * emul64_task_nthreads
153 * Specifies the number of threads that should be used to process a
154 * controller's task queue. Our init function sets this to the number
155 * of CPUs on the system, but this can be overridden in emul64.conf.
157 int emul64_max_task
= 16;
158 int emul64_task_nthreads
= 1;
163 static void *emul64_state
= NULL
;
166 * Character/block operations.
168 static struct cb_ops emul64_cbops
= {
169 scsi_hba_open
, /* cb_open */
170 scsi_hba_close
, /* cb_close */
171 nodev
, /* cb_strategy */
172 nodev
, /* cb_print */
175 nodev
, /* cb_write */
176 emul64_ioctl
, /* cb_ioctl */
177 nodev
, /* cb_devmap */
179 nodev
, /* cb_segmap */
180 nochpoll
, /* cb_chpoll */
181 ddi_prop_op
, /* cb_prop_op */
183 D_MP
| D_64BIT
| D_HOTPLUG
, /* cb_flag */
185 nodev
, /* cb_aread */
186 nodev
/* cb_awrite */
190 * autoconfiguration routines.
193 static struct dev_ops emul64_ops
= {
196 emul64_info
, /* getinfo */
197 nulldev
, /* identify */
199 emul64_attach
, /* attach */
200 emul64_detach
, /* detach */
202 &emul64_cbops
, /* char/block ops */
205 ddi_quiesce_not_needed
, /* quiesce */
208 static struct modldrv modldrv
= {
209 &mod_driverops
, /* module type - driver */
210 "emul64 SCSI Host Bus Adapter", /* module name */
211 &emul64_ops
, /* driver ops */
214 static struct modlinkage modlinkage
= {
215 MODREV_1
, /* ml_rev - must be MODREV_1 */
216 &modldrv
, /* ml_linkage */
217 NULL
/* end of driver linkage */
225 ret
= ddi_soft_state_init(&emul64_state
, sizeof (struct emul64
),
226 EMUL64_INITIAL_SOFT_SPACE
);
230 if ((ret
= scsi_hba_init(&modlinkage
)) != 0) {
231 ddi_soft_state_fini(&emul64_state
);
235 /* Set the number of task threads to the number of CPUs */
236 if (boot_max_ncpus
== -1) {
237 emul64_task_nthreads
= max_ncpus
;
239 emul64_task_nthreads
= boot_max_ncpus
;
244 ret
= mod_install(&modlinkage
);
247 scsi_hba_fini(&modlinkage
);
248 ddi_soft_state_fini(&emul64_state
);
259 if ((ret
= mod_remove(&modlinkage
)) != 0)
264 scsi_hba_fini(&modlinkage
);
266 ddi_soft_state_fini(&emul64_state
);
272 _info(struct modinfo
*modinfop
)
274 return (mod_info(&modlinkage
, modinfop
));
278 * Given the device number return the devinfo pointer
279 * from the scsi_device structure.
283 emul64_info(dev_info_t
*dip
, ddi_info_cmd_t cmd
, void *arg
, void **result
)
286 int instance
= getminor((dev_t
)arg
);
289 case DDI_INFO_DEVT2DEVINFO
:
290 foo
= ddi_get_soft_state(emul64_state
, instance
);
292 *result
= (void *)foo
->emul64_dip
;
295 return (DDI_FAILURE
);
299 case DDI_INFO_DEVT2INSTANCE
:
300 *result
= (void *)(uintptr_t)instance
;
304 return (DDI_FAILURE
);
307 return (DDI_SUCCESS
);
311 * Attach an instance of an emul64 host adapter. Allocate data structures,
312 * initialize the emul64 and we're on the air.
316 emul64_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
318 int mutex_initted
= 0;
319 struct emul64
*emul64
;
321 scsi_hba_tran_t
*tran
= NULL
;
322 ddi_dma_attr_t tmp_dma_attr
;
324 emul64_bsd_get_props(dip
);
326 bzero((void *) &tmp_dma_attr
, sizeof (tmp_dma_attr
));
327 instance
= ddi_get_instance(dip
);
334 tran
= (scsi_hba_tran_t
*)ddi_get_driver_private(dip
);
336 return (DDI_FAILURE
);
338 emul64
= TRAN2EMUL64(tran
);
340 return (DDI_SUCCESS
);
343 emul64_i_log(NULL
, CE_WARN
,
344 "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance
);
345 return (DDI_FAILURE
);
349 * Allocate emul64 data structure.
351 if (ddi_soft_state_zalloc(emul64_state
, instance
) != DDI_SUCCESS
) {
352 emul64_i_log(NULL
, CE_WARN
,
353 "emul64%d: Failed to alloc soft state",
355 return (DDI_FAILURE
);
358 emul64
= (struct emul64
*)ddi_get_soft_state(emul64_state
, instance
);
359 if (emul64
== NULL
) {
360 emul64_i_log(NULL
, CE_WARN
, "emul64%d: Bad soft state",
362 ddi_soft_state_free(emul64_state
, instance
);
363 return (DDI_FAILURE
);
368 * Allocate a transport structure
370 tran
= scsi_hba_tran_alloc(dip
, SCSI_HBA_CANSLEEP
);
372 cmn_err(CE_WARN
, "emul64: scsi_hba_tran_alloc failed\n");
376 emul64
->emul64_tran
= tran
;
377 emul64
->emul64_dip
= dip
;
379 tran
->tran_hba_private
= emul64
;
380 tran
->tran_tgt_private
= NULL
;
381 tran
->tran_tgt_init
= emul64_tran_tgt_init
;
382 tran
->tran_tgt_probe
= scsi_hba_probe
;
383 tran
->tran_tgt_free
= NULL
;
385 tran
->tran_start
= emul64_scsi_start
;
386 tran
->tran_abort
= emul64_scsi_abort
;
387 tran
->tran_reset
= emul64_scsi_reset
;
388 tran
->tran_getcap
= emul64_scsi_getcap
;
389 tran
->tran_setcap
= emul64_scsi_setcap
;
390 tran
->tran_init_pkt
= emul64_scsi_init_pkt
;
391 tran
->tran_destroy_pkt
= emul64_scsi_destroy_pkt
;
392 tran
->tran_dmafree
= emul64_scsi_dmafree
;
393 tran
->tran_sync_pkt
= emul64_scsi_sync_pkt
;
394 tran
->tran_reset_notify
= emul64_scsi_reset_notify
;
396 tmp_dma_attr
.dma_attr_minxfer
= 0x1;
397 tmp_dma_attr
.dma_attr_burstsizes
= 0x7f;
400 * Attach this instance of the hba
402 if (scsi_hba_attach_setup(dip
, &tmp_dma_attr
, tran
,
404 cmn_err(CE_WARN
, "emul64: scsi_hba_attach failed\n");
408 emul64
->emul64_initiator_id
= 2;
411 * Look up the scsi-options property
413 emul64
->emul64_scsi_options
=
414 ddi_prop_get_int(DDI_DEV_T_ANY
, dip
, 0, "scsi-options",
415 EMUL64_DEFAULT_SCSI_OPTIONS
);
416 EMUL64_DEBUG(emul64
, SCSI_DEBUG
, "emul64 scsi-options=%x",
417 emul64
->emul64_scsi_options
);
420 /* mutexes to protect the emul64 request and response queue */
421 mutex_init(EMUL64_REQ_MUTEX(emul64
), NULL
, MUTEX_DRIVER
,
422 emul64
->emul64_iblock
);
423 mutex_init(EMUL64_RESP_MUTEX(emul64
), NULL
, MUTEX_DRIVER
,
424 emul64
->emul64_iblock
);
428 EMUL64_MUTEX_ENTER(emul64
);
431 * Initialize the default Target Capabilities and Sync Rates
433 emul64_i_initcap(emul64
);
435 EMUL64_MUTEX_EXIT(emul64
);
439 emul64
->emul64_taskq
= taskq_create("emul64_comp",
440 emul64_task_nthreads
, MINCLSYSPRI
, 1, emul64_max_task
, 0);
442 return (DDI_SUCCESS
);
445 emul64_i_log(NULL
, CE_WARN
, "emul64%d: Unable to attach", instance
);
448 mutex_destroy(EMUL64_REQ_MUTEX(emul64
));
449 mutex_destroy(EMUL64_RESP_MUTEX(emul64
));
452 scsi_hba_tran_free(tran
);
454 ddi_soft_state_free(emul64_state
, instance
);
455 return (DDI_FAILURE
);
460 emul64_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
462 struct emul64
*emul64
;
463 scsi_hba_tran_t
*tran
;
464 int instance
= ddi_get_instance(dip
);
467 /* get transport structure pointer from the dip */
468 if (!(tran
= (scsi_hba_tran_t
*)ddi_get_driver_private(dip
))) {
469 return (DDI_FAILURE
);
472 /* get soft state from transport structure */
473 emul64
= TRAN2EMUL64(tran
);
476 return (DDI_FAILURE
);
479 EMUL64_DEBUG(emul64
, SCSI_DEBUG
, "emul64_detach: cmd = %d", cmd
);
483 EMUL64_MUTEX_ENTER(emul64
);
485 taskq_destroy(emul64
->emul64_taskq
);
486 (void) scsi_hba_detach(dip
);
488 scsi_hba_tran_free(emul64
->emul64_tran
);
491 EMUL64_MUTEX_EXIT(emul64
);
493 mutex_destroy(EMUL64_REQ_MUTEX(emul64
));
494 mutex_destroy(EMUL64_RESP_MUTEX(emul64
));
497 EMUL64_DEBUG(emul64
, SCSI_DEBUG
, "emul64_detach: done");
498 ddi_soft_state_free(emul64_state
, instance
);
500 return (DDI_SUCCESS
);
503 return (DDI_SUCCESS
);
506 return (DDI_FAILURE
);
511 * Function name : emul64_tran_tgt_init
513 * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise
518 emul64_tran_tgt_init(dev_info_t
*hba_dip
, dev_info_t
*tgt_dip
,
519 scsi_hba_tran_t
*tran
, struct scsi_device
*sd
)
521 struct emul64
*emul64
;
523 char **geo_vidpid
= NULL
;
525 uint32_t *geoip
= NULL
;
528 lldaddr_t sector_count
;
530 int ret
= DDI_FAILURE
;
532 emul64
= TRAN2EMUL64(tran
);
533 EMUL64_MUTEX_ENTER(emul64
);
536 * We get called for each target driver.conf node, multiple
537 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc).
538 * Check to see if transport to tgt,lun already established.
540 tgt
= find_tgt(emul64
, sd
->sd_address
.a_target
, sd
->sd_address
.a_lun
);
546 /* see if we have driver.conf specified device for this target,lun */
547 (void) snprintf(prop_name
, sizeof (prop_name
), "targ_%d_%d",
548 sd
->sd_address
.a_target
, sd
->sd_address
.a_lun
);
549 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY
, hba_dip
,
550 DDI_PROP_DONTPASS
, prop_name
,
551 &geo_vidpid
, &length
) != DDI_PROP_SUCCESS
)
554 cmn_err(CE_WARN
, "emul64: %s property does not have 2 "
555 "elements", prop_name
);
559 /* pick geometry name and vidpid string from string array */
561 vidpid
= *(geo_vidpid
+ 1);
563 /* lookup geometry property integer array */
564 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, hba_dip
, DDI_PROP_DONTPASS
,
565 geo
, (int **)&geoip
, &length2
) != DDI_PROP_SUCCESS
) {
566 cmn_err(CE_WARN
, "emul64: didn't get prop '%s'", geo
);
570 cmn_err(CE_WARN
, "emul64: property %s does not have 6 "
571 "elements", *geo_vidpid
);
575 /* allocate and initialize tgt structure for tgt,lun */
576 tgt
= kmem_zalloc(sizeof (emul64_tgt_t
), KM_SLEEP
);
577 rw_init(&tgt
->emul64_tgt_nw_lock
, NULL
, RW_DRIVER
, NULL
);
578 mutex_init(&tgt
->emul64_tgt_blk_lock
, NULL
, MUTEX_DRIVER
, NULL
);
580 /* create avl for data block storage */
581 avl_create(&tgt
->emul64_tgt_data
, emul64_bsd_blkcompare
,
582 sizeof (blklist_t
), offsetof(blklist_t
, bl_node
));
584 /* save scsi_address and vidpid */
585 bcopy(sd
, &tgt
->emul64_tgt_saddr
, sizeof (struct scsi_address
));
586 (void) strncpy(tgt
->emul64_tgt_inq
, vidpid
,
587 sizeof (emul64
->emul64_tgt
->emul64_tgt_inq
));
590 * The high order 4 bytes of the sector count always come first in
591 * emul64.conf. They are followed by the low order 4 bytes. Not
592 * all CPU types want them in this order, but laddr_t takes care of
593 * this for us. We then pick up geometry (ncyl X nheads X nsect).
595 sector_count
._p
._u
= *(geoip
+ 0);
596 sector_count
._p
._l
= *(geoip
+ 1);
598 * On 32-bit platforms, fix block size if it's greater than the
602 if (sector_count
._f
> DK_MAX_BLOCKS
)
603 sector_count
._f
= DK_MAX_BLOCKS
;
605 tgt
->emul64_tgt_sectors
= sector_count
._f
;
606 tgt
->emul64_tgt_dtype
= *(geoip
+ 2);
607 tgt
->emul64_tgt_ncyls
= *(geoip
+ 3);
608 tgt
->emul64_tgt_nheads
= *(geoip
+ 4);
609 tgt
->emul64_tgt_nsect
= *(geoip
+ 5);
611 /* insert target structure into list */
612 tgt
->emul64_tgt_next
= emul64
->emul64_tgt
;
613 emul64
->emul64_tgt
= tgt
;
616 out
: EMUL64_MUTEX_EXIT(emul64
);
618 ddi_prop_free(geoip
);
620 ddi_prop_free(geo_vidpid
);
625 * Function name : emul64_i_initcap
627 * Return Values : NONE
628 * Description : Initializes the default target capabilities and
631 * Context : Called from the user thread through attach.
635 emul64_i_initcap(struct emul64
*emul64
)
642 for (i
= 0; i
< NTARGETS_WIDE
; i
++) {
643 emul64
->emul64_cap
[i
] = cap
;
644 emul64
->emul64_synch
[i
] = synch
;
646 EMUL64_DEBUG(emul64
, SCSI_DEBUG
, "default cap = 0x%x", cap
);
650 * Function name : emul64_scsi_getcap()
652 * Return Values : current value of capability, if defined
653 * -1 if capability is not defined
654 * Description : returns current capability value
656 * Context : Can be called from different kernel process threads.
657 * Can be called by interrupt thread.
660 emul64_scsi_getcap(struct scsi_address
*ap
, char *cap
, int whom
)
662 struct emul64
*emul64
= ADDR2EMUL64(ap
);
666 * We don't allow inquiring about capabilities for other targets
668 if (cap
== NULL
|| whom
== 0) {
672 EMUL64_MUTEX_ENTER(emul64
);
674 switch (scsi_hba_lookup_capstr(cap
)) {
675 case SCSI_CAP_DMA_MAX
:
676 rval
= 1 << 24; /* Limit to 16MB max transfer */
678 case SCSI_CAP_MSG_OUT
:
681 case SCSI_CAP_DISCONNECT
:
684 case SCSI_CAP_SYNCHRONOUS
:
687 case SCSI_CAP_WIDE_XFER
:
690 case SCSI_CAP_TAGGED_QING
:
693 case SCSI_CAP_UNTAGGED_QING
:
696 case SCSI_CAP_PARITY
:
699 case SCSI_CAP_INITIATOR_ID
:
700 rval
= emul64
->emul64_initiator_id
;
705 case SCSI_CAP_LINKED_CMDS
:
707 case SCSI_CAP_RESET_NOTIFICATION
:
716 EMUL64_MUTEX_EXIT(emul64
);
722 * Function name : emul64_scsi_setcap()
724 * Return Values : 1 - capability exists and can be set to new value
725 * 0 - capability could not be set to new value
726 * -1 - no such capability
728 * Description : sets a capability for a target
730 * Context : Can be called from different kernel process threads.
731 * Can be called by interrupt thread.
734 emul64_scsi_setcap(struct scsi_address
*ap
, char *cap
, int value
, int whom
)
736 struct emul64
*emul64
= ADDR2EMUL64(ap
);
740 * We don't allow setting capabilities for other targets
742 if (cap
== NULL
|| whom
== 0) {
746 EMUL64_MUTEX_ENTER(emul64
);
748 switch (scsi_hba_lookup_capstr(cap
)) {
749 case SCSI_CAP_DMA_MAX
:
750 case SCSI_CAP_MSG_OUT
:
751 case SCSI_CAP_PARITY
:
752 case SCSI_CAP_UNTAGGED_QING
:
753 case SCSI_CAP_LINKED_CMDS
:
754 case SCSI_CAP_RESET_NOTIFICATION
:
756 * None of these are settable via
757 * the capability interface.
760 case SCSI_CAP_DISCONNECT
:
763 case SCSI_CAP_SYNCHRONOUS
:
766 case SCSI_CAP_TAGGED_QING
:
769 case SCSI_CAP_WIDE_XFER
:
772 case SCSI_CAP_INITIATOR_ID
:
778 case SCSI_CAP_TOTAL_SECTORS
:
779 emul64
->nt_total_sectors
[ap
->a_target
][ap
->a_lun
] = value
;
782 case SCSI_CAP_SECTOR_SIZE
:
791 EMUL64_MUTEX_EXIT(emul64
);
797 * Function name : emul64_scsi_init_pkt
799 * Return Values : pointer to scsi_pkt, or NULL
800 * Description : Called by kernel on behalf of a target driver
801 * calling scsi_init_pkt(9F).
802 * Refer to tran_init_pkt(9E) man page
804 * Context : Can be called from different kernel process threads.
805 * Can be called by interrupt thread.
808 static struct scsi_pkt
*
809 emul64_scsi_init_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
,
810 struct buf
*bp
, int cmdlen
, int statuslen
, int tgtlen
,
811 int flags
, int (*callback
)(), caddr_t arg
)
813 struct emul64
*emul64
= ADDR2EMUL64(ap
);
814 struct emul64_cmd
*sp
;
816 ASSERT(callback
== NULL_FUNC
|| callback
== SLEEP_FUNC
);
819 * First step of emul64_scsi_init_pkt: pkt allocation
822 pkt
= scsi_hba_pkt_alloc(emul64
->emul64_dip
, ap
, cmdlen
,
824 tgtlen
, sizeof (struct emul64_cmd
), callback
, arg
);
826 cmn_err(CE_WARN
, "emul64_scsi_init_pkt: "
827 "scsi_hba_pkt_alloc failed");
834 * Initialize the new pkt - we redundantly initialize
835 * all the fields for illustrative purposes.
839 sp
->cmd_scblen
= statuslen
;
840 sp
->cmd_cdblen
= cmdlen
;
841 sp
->cmd_emul64
= emul64
;
842 pkt
->pkt_address
= *ap
;
843 pkt
->pkt_comp
= (void (*)())NULL
;
847 pkt
->pkt_statistics
= 0;
855 * Second step of emul64_scsi_init_pkt: dma allocation/move
857 if (bp
&& bp
->b_bcount
!= 0) {
858 if (bp
->b_flags
& B_READ
) {
859 sp
->cmd_flags
&= ~CFLAG_DMASEND
;
861 sp
->cmd_flags
|= CFLAG_DMASEND
;
864 sp
->cmd_addr
= (unsigned char *) bp
->b_un
.b_addr
;
865 sp
->cmd_count
= bp
->b_bcount
;
874 * Function name : emul64_scsi_destroy_pkt
876 * Return Values : none
877 * Description : Called by kernel on behalf of a target driver
878 * calling scsi_destroy_pkt(9F).
879 * Refer to tran_destroy_pkt(9E) man page
881 * Context : Can be called from different kernel process threads.
882 * Can be called by interrupt thread.
885 emul64_scsi_destroy_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
887 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
890 * emul64_scsi_dmafree inline to make things faster
892 if (sp
->cmd_flags
& CFLAG_DMAVALID
) {
896 sp
->cmd_flags
&= ~CFLAG_DMAVALID
;
902 scsi_hba_pkt_free(ap
, pkt
);
907 * Function name : emul64_scsi_dmafree()
909 * Return Values : none
910 * Description : free dvma resources
912 * Context : Can be called from different kernel process threads.
913 * Can be called by interrupt thread.
917 emul64_scsi_dmafree(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
922 * Function name : emul64_scsi_sync_pkt()
924 * Return Values : none
925 * Description : sync dma
927 * Context : Can be called from different kernel process threads.
928 * Can be called by interrupt thread.
932 emul64_scsi_sync_pkt(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
937 * routine for reset notification setup, to register or cancel.
940 emul64_scsi_reset_notify(struct scsi_address
*ap
, int flag
,
941 void (*callback
)(caddr_t
), caddr_t arg
)
943 struct emul64
*emul64
= ADDR2EMUL64(ap
);
944 struct emul64_reset_notify_entry
*p
, *beforep
;
945 int rval
= DDI_FAILURE
;
947 mutex_enter(EMUL64_REQ_MUTEX(emul64
));
949 p
= emul64
->emul64_reset_notify_listf
;
954 break; /* An entry exists for this target */
959 if ((flag
& SCSI_RESET_CANCEL
) && (p
!= NULL
)) {
960 if (beforep
== NULL
) {
961 emul64
->emul64_reset_notify_listf
= p
->next
;
963 beforep
->next
= p
->next
;
965 kmem_free((caddr_t
)p
,
966 sizeof (struct emul64_reset_notify_entry
));
969 } else if ((flag
& SCSI_RESET_NOTIFY
) && (p
== NULL
)) {
970 p
= kmem_zalloc(sizeof (struct emul64_reset_notify_entry
),
973 p
->callback
= callback
;
975 p
->next
= emul64
->emul64_reset_notify_listf
;
976 emul64
->emul64_reset_notify_listf
= p
;
980 mutex_exit(EMUL64_REQ_MUTEX(emul64
));
986 * Function name : emul64_scsi_start()
988 * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown
989 * TRAN_BUSY - request queue is full
990 * TRAN_ACCEPT - pkt has been submitted to emul64
992 * Description : init pkt, start the request
994 * Context : Can be called from different kernel process threads.
995 * Can be called by interrupt thread.
998 emul64_scsi_start(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1000 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
1001 int rval
= TRAN_ACCEPT
;
1002 struct emul64
*emul64
= ADDR2EMUL64(ap
);
1004 taskqid_t dispatched
;
1006 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64
)) == 0 || ddi_in_panic());
1007 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64
)) == 0 || ddi_in_panic());
1009 EMUL64_DEBUG2(emul64
, SCSI_DEBUG
, "emul64_scsi_start %x", sp
);
1011 pkt
->pkt_reason
= CMD_CMPLT
;
1014 if (emul64_cdb_debug
) {
1015 emul64_debug_dump_cdb(ap
, pkt
);
1017 #endif /* EMUL64DEBUG */
1020 * calculate deadline from pkt_time
1021 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so
1022 * we can shift and at the same time have a 28% grace period
1023 * we ignore the rare case of pkt_time == 0 and deal with it
1024 * in emul64_i_watch()
1026 cur_lbolt
= ddi_get_lbolt();
1027 sp
->cmd_deadline
= cur_lbolt
+ (pkt
->pkt_time
* 128);
1029 if ((emul64_usetaskq
== 0) || (pkt
->pkt_flags
& FLAG_NOINTR
) != 0) {
1030 emul64_pkt_comp((caddr_t
)pkt
);
1032 dispatched
= (uintptr_t)NULL
;
1033 if (emul64_collect_stats
) {
1035 * If we are collecting statistics, call
1036 * taskq_dispatch in no sleep mode, so that we can
1037 * detect if we are exceeding the queue length that
1038 * was established in the call to taskq_create in
1039 * emul64_attach. If the no sleep call fails
1040 * (returns NULL), the task will be dispatched in
1043 dispatched
= taskq_dispatch(emul64
->emul64_taskq
,
1044 emul64_pkt_comp
, (void *)pkt
, TQ_NOSLEEP
);
1045 if (dispatched
== (uintptr_t)NULL
) {
1046 /* Queue was full. dispatch failed. */
1047 mutex_enter(&emul64_stats_mutex
);
1049 mutex_exit(&emul64_stats_mutex
);
1052 if (dispatched
== (uintptr_t)NULL
) {
1053 (void) taskq_dispatch(emul64
->emul64_taskq
,
1054 emul64_pkt_comp
, (void *)pkt
, TQ_SLEEP
);
1059 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64
)) == 0 || ddi_in_panic());
1060 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64
)) == 0 || ddi_in_panic());
1066 emul64_check_cond(struct scsi_pkt
*pkt
, uchar_t key
, uchar_t asc
, uchar_t ascq
)
1068 struct scsi_arq_status
*arq
=
1069 (struct scsi_arq_status
*)pkt
->pkt_scbp
;
1071 /* got check, no data transferred and ARQ done */
1072 arq
->sts_status
.sts_chk
= 1;
1073 pkt
->pkt_state
|= STATE_ARQ_DONE
;
1074 pkt
->pkt_state
&= ~STATE_XFERRED_DATA
;
1077 arq
->sts_rqpkt_reason
= CMD_CMPLT
;
1078 arq
->sts_rqpkt_resid
= 0;
1079 arq
->sts_rqpkt_state
= STATE_GOT_BUS
| STATE_GOT_TARGET
|
1080 STATE_SENT_CMD
| STATE_XFERRED_DATA
| STATE_GOT_STATUS
;
1081 arq
->sts_sensedata
.es_valid
= 1;
1082 arq
->sts_sensedata
.es_class
= 0x7;
1083 arq
->sts_sensedata
.es_key
= key
;
1084 arq
->sts_sensedata
.es_add_code
= asc
;
1085 arq
->sts_sensedata
.es_qual_code
= ascq
;
1089 emul64_error_inject(struct scsi_pkt
*pkt
)
1091 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
1093 struct scsi_arq_status
*arq
=
1094 (struct scsi_arq_status
*)pkt
->pkt_scbp
;
1095 uint_t max_sense_len
;
1097 EMUL64_MUTEX_ENTER(sp
->cmd_emul64
);
1098 tgt
= find_tgt(sp
->cmd_emul64
,
1099 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
);
1100 EMUL64_MUTEX_EXIT(sp
->cmd_emul64
);
1103 * If there is no target, skip the error injection and
1104 * let the packet be handled normally. This would normally
1105 * never happen since a_target and a_lun are setup in
1106 * emul64_scsi_init_pkt.
1109 return (ERR_INJ_DISABLE
);
1112 if (tgt
->emul64_einj_state
!= ERR_INJ_DISABLE
) {
1113 arq
->sts_status
= tgt
->emul64_einj_scsi_status
;
1114 pkt
->pkt_state
= tgt
->emul64_einj_pkt_state
;
1115 pkt
->pkt_reason
= tgt
->emul64_einj_pkt_reason
;
1118 * Calculate available sense buffer length. We could just
1119 * assume sizeof(struct scsi_extended_sense) but hopefully
1120 * that limitation will go away soon.
1122 max_sense_len
= sp
->cmd_scblen
-
1123 (sizeof (struct scsi_arq_status
) -
1124 sizeof (struct scsi_extended_sense
));
1125 if (max_sense_len
> tgt
->emul64_einj_sense_length
) {
1126 max_sense_len
= tgt
->emul64_einj_sense_length
;
1130 arq
->sts_rqpkt_reason
= CMD_CMPLT
;
1131 arq
->sts_rqpkt_resid
= 0;
1132 arq
->sts_rqpkt_state
= STATE_GOT_BUS
| STATE_GOT_TARGET
|
1133 STATE_SENT_CMD
| STATE_XFERRED_DATA
| STATE_GOT_STATUS
;
1135 /* Copy sense data */
1136 if (tgt
->emul64_einj_sense_data
!= 0) {
1137 bcopy(tgt
->emul64_einj_sense_data
,
1138 (uint8_t *)&arq
->sts_sensedata
,
1143 /* Return current error injection state */
1144 return (tgt
->emul64_einj_state
);
1148 emul64_error_inject_req(struct emul64
*emul64
, intptr_t arg
)
1151 struct emul64_error_inj_data error_inj_req
;
1154 if (arg
== (intptr_t)NULL
) {
1158 if (ddi_copyin((void *)arg
, &error_inj_req
,
1159 sizeof (error_inj_req
), 0) != 0) {
1160 cmn_err(CE_WARN
, "emul64: ioctl - inj copyin failed\n");
1164 EMUL64_MUTEX_ENTER(emul64
);
1165 tgt
= find_tgt(emul64
, error_inj_req
.eccd_target
,
1166 error_inj_req
.eccd_lun
);
1167 EMUL64_MUTEX_EXIT(emul64
);
1169 /* Make sure device exists */
1174 /* Free old sense buffer if we have one */
1175 if (tgt
->emul64_einj_sense_data
!= NULL
) {
1176 ASSERT(tgt
->emul64_einj_sense_length
!= 0);
1177 kmem_free(tgt
->emul64_einj_sense_data
,
1178 tgt
->emul64_einj_sense_length
);
1179 tgt
->emul64_einj_sense_data
= NULL
;
1180 tgt
->emul64_einj_sense_length
= 0;
1184 * Now handle error injection request. If error injection
1185 * is requested we will return the sense data provided for
1186 * any I/O to this target until told to stop.
1188 tgt
->emul64_einj_state
= error_inj_req
.eccd_inj_state
;
1189 tgt
->emul64_einj_sense_length
= error_inj_req
.eccd_sns_dlen
;
1190 tgt
->emul64_einj_pkt_state
= error_inj_req
.eccd_pkt_state
;
1191 tgt
->emul64_einj_pkt_reason
= error_inj_req
.eccd_pkt_reason
;
1192 tgt
->emul64_einj_scsi_status
= error_inj_req
.eccd_scsi_status
;
1193 switch (error_inj_req
.eccd_inj_state
) {
1194 case ERR_INJ_ENABLE
:
1195 case ERR_INJ_ENABLE_NODATA
:
1196 if (error_inj_req
.eccd_sns_dlen
) {
1197 tgt
->emul64_einj_sense_data
=
1198 kmem_alloc(error_inj_req
.eccd_sns_dlen
, KM_SLEEP
);
1199 /* Copy sense data */
1200 if (ddi_copyin((void *)(arg
+ sizeof (error_inj_req
)),
1201 tgt
->emul64_einj_sense_data
,
1202 error_inj_req
.eccd_sns_dlen
, 0) != 0) {
1204 "emul64: sense data copy in failed\n");
1209 case ERR_INJ_DISABLE
:
1217 int bsd_scsi_start_stop_unit(struct scsi_pkt
*);
1218 int bsd_scsi_test_unit_ready(struct scsi_pkt
*);
1219 int bsd_scsi_request_sense(struct scsi_pkt
*);
1220 int bsd_scsi_inquiry(struct scsi_pkt
*);
1221 int bsd_scsi_format(struct scsi_pkt
*);
1222 int bsd_scsi_io(struct scsi_pkt
*);
1223 int bsd_scsi_log_sense(struct scsi_pkt
*);
1224 int bsd_scsi_mode_sense(struct scsi_pkt
*);
1225 int bsd_scsi_mode_select(struct scsi_pkt
*);
1226 int bsd_scsi_read_capacity(struct scsi_pkt
*);
1227 int bsd_scsi_read_capacity_16(struct scsi_pkt
*);
1228 int bsd_scsi_reserve(struct scsi_pkt
*);
1229 int bsd_scsi_format(struct scsi_pkt
*);
1230 int bsd_scsi_release(struct scsi_pkt
*);
1231 int bsd_scsi_read_defect_list(struct scsi_pkt
*);
1232 int bsd_scsi_reassign_block(struct scsi_pkt
*);
1233 int bsd_freeblkrange(emul64_tgt_t
*, emul64_range_t
*);
1236 emul64_handle_cmd(struct scsi_pkt
*pkt
)
1238 if (emul64_error_inject(pkt
) == ERR_INJ_ENABLE_NODATA
) {
1240 * If error injection is configured to return with
1241 * no data return now without handling the command.
1242 * This is how normal check conditions work.
1244 * If the error injection state is ERR_INJ_ENABLE
1245 * (or if error injection is disabled) continue and
1246 * handle the command. This would be used for
1247 * KEY_RECOVERABLE_ERROR type conditions.
1252 switch (pkt
->pkt_cdbp
[0]) {
1253 case SCMD_START_STOP
:
1254 (void) bsd_scsi_start_stop_unit(pkt
);
1256 case SCMD_TEST_UNIT_READY
:
1257 (void) bsd_scsi_test_unit_ready(pkt
);
1259 case SCMD_REQUEST_SENSE
:
1260 (void) bsd_scsi_request_sense(pkt
);
1263 (void) bsd_scsi_inquiry(pkt
);
1266 (void) bsd_scsi_format(pkt
);
1274 (void) bsd_scsi_io(pkt
);
1276 case SCMD_LOG_SENSE_G1
:
1277 (void) bsd_scsi_log_sense(pkt
);
1279 case SCMD_MODE_SENSE
:
1280 case SCMD_MODE_SENSE_G1
:
1281 (void) bsd_scsi_mode_sense(pkt
);
1283 case SCMD_MODE_SELECT
:
1284 case SCMD_MODE_SELECT_G1
:
1285 (void) bsd_scsi_mode_select(pkt
);
1287 case SCMD_READ_CAPACITY
:
1288 (void) bsd_scsi_read_capacity(pkt
);
1290 case SCMD_SVC_ACTION_IN_G4
:
1291 if (pkt
->pkt_cdbp
[1] == SSVC_ACTION_READ_CAPACITY_G4
) {
1292 (void) bsd_scsi_read_capacity_16(pkt
);
1294 cmn_err(CE_WARN
, "emul64: unrecognized G4 service "
1295 "action 0x%x", pkt
->pkt_cdbp
[1]);
1299 case SCMD_RESERVE_G1
:
1300 (void) bsd_scsi_reserve(pkt
);
1303 case SCMD_RELEASE_G1
:
1304 (void) bsd_scsi_release(pkt
);
1306 case SCMD_REASSIGN_BLOCK
:
1307 (void) bsd_scsi_reassign_block(pkt
);
1309 case SCMD_READ_DEFECT_LIST
:
1310 (void) bsd_scsi_read_defect_list(pkt
);
1314 case SCMD_REPORT_LUNS
:
1315 /* ASC 0x24 INVALID FIELD IN CDB */
1316 emul64_check_cond(pkt
, KEY_ILLEGAL_REQUEST
, 0x24, 0x0);
1319 cmn_err(CE_WARN
, "emul64: unrecognized "
1320 "SCSI cmd 0x%x", pkt
->pkt_cdbp
[0]);
1321 emul64_check_cond(pkt
, KEY_ILLEGAL_REQUEST
, 0x24, 0x0);
1323 case SCMD_GET_CONFIGURATION
:
1324 case 0x35: /* SCMD_SYNCHRONIZE_CACHE */
1325 /* Don't complain */
1331 emul64_pkt_comp(void * arg
)
1333 struct scsi_pkt
*pkt
= (struct scsi_pkt
*)arg
;
1334 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
1337 EMUL64_MUTEX_ENTER(sp
->cmd_emul64
);
1338 tgt
= find_tgt(sp
->cmd_emul64
,
1339 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
);
1340 EMUL64_MUTEX_EXIT(sp
->cmd_emul64
);
1342 pkt
->pkt_reason
= CMD_TIMEOUT
;
1343 pkt
->pkt_state
= STATE_GOT_BUS
| STATE_SENT_CMD
;
1344 pkt
->pkt_statistics
= STAT_TIMEOUT
;
1346 pkt
->pkt_reason
= CMD_CMPLT
;
1347 *pkt
->pkt_scbp
= STATUS_GOOD
;
1348 pkt
->pkt_state
= STATE_GOT_BUS
| STATE_GOT_TARGET
|
1349 STATE_SENT_CMD
| STATE_XFERRED_DATA
| STATE_GOT_STATUS
;
1350 pkt
->pkt_statistics
= 0;
1351 emul64_handle_cmd(pkt
);
1353 scsi_hba_pkt_comp(pkt
);
1358 emul64_scsi_abort(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1365 emul64_scsi_reset(struct scsi_address
*ap
, int level
)
1371 emul64_get_tgtrange(struct emul64
*emul64
,
1373 emul64_tgt_t
**tgtp
,
1374 emul64_tgt_range_t
*tgtr
)
1376 if (ddi_copyin((void *)arg
, tgtr
, sizeof (*tgtr
), 0) != 0) {
1377 cmn_err(CE_WARN
, "emul64: ioctl - copy in failed\n");
1380 EMUL64_MUTEX_ENTER(emul64
);
1381 *tgtp
= find_tgt(emul64
, tgtr
->emul64_target
, tgtr
->emul64_lun
);
1382 EMUL64_MUTEX_EXIT(emul64
);
1383 if (*tgtp
== NULL
) {
1384 cmn_err(CE_WARN
, "emul64: ioctl - no target for %d,%d on %d",
1385 tgtr
->emul64_target
, tgtr
->emul64_lun
,
1386 ddi_get_instance(emul64
->emul64_dip
));
1393 emul64_ioctl(dev_t dev
,
1400 struct emul64
*emul64
;
1403 emul64_tgt_range_t tgtr
;
1406 instance
= MINOR2INST(getminor(dev
));
1407 emul64
= (struct emul64
*)ddi_get_soft_state(emul64_state
, instance
);
1408 if (emul64
== NULL
) {
1409 cmn_err(CE_WARN
, "emul64: ioctl - no softstate for %d\n",
1415 case EMUL64_WRITE_OFF
:
1416 rv
= emul64_get_tgtrange(emul64
, arg
, &tgt
, &tgtr
);
1418 rv
= emul64_write_off(emul64
, tgt
, &tgtr
);
1421 case EMUL64_WRITE_ON
:
1422 rv
= emul64_get_tgtrange(emul64
, arg
, &tgt
, &tgtr
);
1424 rv
= emul64_write_on(emul64
, tgt
, &tgtr
);
1427 case EMUL64_ZERO_RANGE
:
1428 rv
= emul64_get_tgtrange(emul64
, arg
, &tgt
, &tgtr
);
1430 mutex_enter(&tgt
->emul64_tgt_blk_lock
);
1431 rv
= bsd_freeblkrange(tgt
, &tgtr
.emul64_blkrange
);
1432 mutex_exit(&tgt
->emul64_tgt_blk_lock
);
1435 case EMUL64_ERROR_INJECT
:
1436 rv
= emul64_error_inject_req(emul64
, arg
);
1439 rv
= scsi_hba_ioctl(dev
, cmd
, arg
, mode
, credp
, rvalp
);
1447 emul64_write_off(struct emul64
*emul64
,
1449 emul64_tgt_range_t
*tgtr
)
1451 size_t blkcnt
= tgtr
->emul64_blkrange
.emul64_blkcnt
;
1452 emul64_nowrite_t
*cur
;
1453 emul64_nowrite_t
*nowrite
;
1454 emul64_rng_overlap_t overlap
= O_NONE
;
1455 emul64_nowrite_t
**prev
= NULL
;
1456 diskaddr_t sb
= tgtr
->emul64_blkrange
.emul64_sb
;
1458 nowrite
= emul64_nowrite_alloc(&tgtr
->emul64_blkrange
);
1460 /* Find spot in list */
1461 rw_enter(&tgt
->emul64_tgt_nw_lock
, RW_WRITER
);
1462 cur
= emul64_find_nowrite(tgt
, sb
, blkcnt
, &overlap
, &prev
);
1463 if (overlap
== O_NONE
) {
1464 /* Insert into list */
1466 nowrite
->emul64_nwnext
= cur
;
1468 rw_exit(&tgt
->emul64_tgt_nw_lock
);
1469 if (overlap
== O_NONE
) {
1470 if (emul64_collect_stats
) {
1471 mutex_enter(&emul64_stats_mutex
);
1472 emul64_nowrite_count
++;
1473 mutex_exit(&emul64_stats_mutex
);
1476 cmn_err(CE_WARN
, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%"
1477 PRIx64
"overlaps 0x%llx,0x%" PRIx64
"\n",
1478 nowrite
->emul64_blocked
.emul64_sb
,
1479 nowrite
->emul64_blocked
.emul64_blkcnt
,
1480 cur
->emul64_blocked
.emul64_sb
,
1481 cur
->emul64_blocked
.emul64_blkcnt
);
1482 emul64_nowrite_free(nowrite
);
1490 emul64_write_on(struct emul64
*emul64
,
1492 emul64_tgt_range_t
*tgtr
)
1494 size_t blkcnt
= tgtr
->emul64_blkrange
.emul64_blkcnt
;
1495 emul64_nowrite_t
*cur
;
1496 emul64_rng_overlap_t overlap
= O_NONE
;
1497 emul64_nowrite_t
**prev
= NULL
;
1499 diskaddr_t sb
= tgtr
->emul64_blkrange
.emul64_sb
;
1501 /* Find spot in list */
1502 rw_enter(&tgt
->emul64_tgt_nw_lock
, RW_WRITER
);
1503 cur
= emul64_find_nowrite(tgt
, sb
, blkcnt
, &overlap
, &prev
);
1504 if (overlap
== O_SAME
) {
1505 /* Remove from list */
1506 *prev
= cur
->emul64_nwnext
;
1508 rw_exit(&tgt
->emul64_tgt_nw_lock
);
1512 cmn_err(CE_WARN
, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1513 "range not found\n", sb
, blkcnt
);
1517 if (emul64_collect_stats
) {
1518 mutex_enter(&emul64_stats_mutex
);
1519 emul64_nowrite_count
--;
1520 mutex_exit(&emul64_stats_mutex
);
1522 emul64_nowrite_free(cur
);
1526 cmn_err(CE_WARN
, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1527 "overlaps 0x%llx,0x%" PRIx64
"\n",
1528 sb
, blkcnt
, cur
->emul64_blocked
.emul64_sb
,
1529 cur
->emul64_blocked
.emul64_blkcnt
);
1536 static emul64_nowrite_t
*
1537 emul64_find_nowrite(emul64_tgt_t
*tgt
,
1540 emul64_rng_overlap_t
*overlap
,
1541 emul64_nowrite_t
***prevp
)
1543 emul64_nowrite_t
*cur
;
1544 emul64_nowrite_t
**prev
;
1546 /* Find spot in list */
1548 prev
= &tgt
->emul64_tgt_nowrite
;
1549 cur
= tgt
->emul64_tgt_nowrite
;
1550 while (cur
!= NULL
) {
1551 *overlap
= emul64_overlap(&cur
->emul64_blocked
, sb
, blkcnt
);
1552 if (*overlap
!= O_NONE
)
1554 prev
= &cur
->emul64_nwnext
;
1555 cur
= cur
->emul64_nwnext
;
1562 static emul64_nowrite_t
*
1563 emul64_nowrite_alloc(emul64_range_t
*range
)
1565 emul64_nowrite_t
*nw
;
1567 nw
= kmem_zalloc(sizeof (*nw
), KM_SLEEP
);
1568 bcopy((void *) range
,
1569 (void *) &nw
->emul64_blocked
,
1570 sizeof (nw
->emul64_blocked
));
1575 emul64_nowrite_free(emul64_nowrite_t
*nw
)
1577 kmem_free((void *) nw
, sizeof (*nw
));
1580 emul64_rng_overlap_t
1581 emul64_overlap(emul64_range_t
*rng
, diskaddr_t sb
, size_t cnt
)
1584 if (rng
->emul64_sb
>= sb
+ cnt
)
1586 if (rng
->emul64_sb
+ rng
->emul64_blkcnt
<= sb
)
1588 if ((rng
->emul64_sb
== sb
) && (rng
->emul64_blkcnt
== cnt
))
1590 if ((sb
>= rng
->emul64_sb
) &&
1591 ((sb
+ cnt
) <= (rng
->emul64_sb
+ rng
->emul64_blkcnt
))) {
1597 #include <sys/varargs.h>
1600 * Error logging, printing, and debug print routines
1605 emul64_i_log(struct emul64
*emul64
, int level
, char *fmt
, ...)
1611 (void) vsnprintf(buf
, sizeof (buf
), fmt
, ap
);
1614 scsi_log(emul64
? emul64
->emul64_dip
: NULL
,
1615 "emul64", level
, "%s\n", buf
);
1622 emul64_debug_dump_cdb(struct scsi_address
*ap
, struct scsi_pkt
*pkt
)
1624 static char hex
[] = "0123456789abcdef";
1625 struct emul64
*emul64
= ADDR2EMUL64(ap
);
1626 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
1627 uint8_t *cdb
= pkt
->pkt_cdbp
;
1632 (void) snprintf(buf
, sizeof (buf
), "emul64%d: <%d,%d> ",
1633 ddi_get_instance(emul64
->emul64_dip
),
1634 ap
->a_target
, ap
->a_lun
);
1636 p
= buf
+ strlen(buf
);
1639 for (i
= 0; i
< sp
->cmd_cdblen
; i
++, cdb
++) {
1642 *p
++ = hex
[(*cdb
>> 4) & 0x0f];
1643 *p
++ = hex
[*cdb
& 0x0f];
1649 cmn_err(CE_CONT
, buf
);
1651 #endif /* EMUL64DEBUG */