4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * pseudo scsi disk driver
30 #include <sys/scsi/scsi.h>
32 #include <sys/sunddi.h>
34 #include <sys/taskq.h>
36 #include <sys/types.h>
39 #include <sys/emul64.h>
40 #include <sys/emul64cmd.h>
41 #include <sys/emul64var.h>
44 * Mode sense/select page control
46 #define MODE_SENSE_PC_CURRENT 0
47 #define MODE_SENSE_PC_CHANGEABLE 1
48 #define MODE_SENSE_PC_DEFAULT 2
49 #define MODE_SENSE_PC_SAVED 3
52 * Byte conversion macros
54 #if defined(_BIG_ENDIAN)
55 #define ushort_to_scsi_ushort(n) (n)
56 #define uint32_to_scsi_uint32(n) (n)
57 #define uint64_to_scsi_uint64(n) (n)
58 #elif defined(_LITTLE_ENDIAN)
60 #define ushort_to_scsi_ushort(n) \
61 ((((n) & 0x00ff) << 8) | \
62 (((n) & 0xff00) >> 8))
64 #define uint32_to_scsi_uint32(n) \
65 ((((n) & 0x000000ff) << 24) | \
66 (((n) & 0x0000ff00) << 8) | \
67 (((n) & 0x00ff0000) >> 8) | \
68 (((n) & 0xff000000) >> 24))
69 #define uint64_to_scsi_uint64(n) \
70 ((((n) & 0x00000000000000ff) << 56) | \
71 (((n) & 0x000000000000ff00) << 40) | \
72 (((n) & 0x0000000000ff0000) << 24) | \
73 (((n) & 0x00000000ff000000) << 8) | \
74 (((n) & 0x000000ff00000000) >> 8) | \
75 (((n) & 0x0000ff0000000000) >> 24) | \
76 (((n) & 0x00ff000000000000) >> 40) | \
77 (((n) & 0xff00000000000000) >> 56))
79 error no _BIG_ENDIAN
or _LITTLE_ENDIAN
81 #define uint_to_byte0(n) ((n) & 0xff)
82 #define uint_to_byte1(n) (((n)>>8) & 0xff)
83 #define uint_to_byte2(n) (((n)>>16) & 0xff)
84 #define uint_to_byte3(n) (((n)>>24) & 0xff)
89 * This structure maps a property name to the place to store its value.
92 char *pm_name
; /* Name of the property. */
93 int *pm_value
; /* Place to store the value. */
96 static int emul64_debug_blklist
= 0;
99 * Some interesting statistics. These are protected by the
100 * emul64_stats_mutex. It would be nice to have an ioctl to print them out,
101 * but we don't have the development time for that now. You can at least
102 * look at them with adb.
105 int emul64_collect_stats
= 1; /* Collect stats if non-zero */
106 kmutex_t emul64_stats_mutex
; /* Protect these variables */
107 long emul64_nowrite_count
= 0; /* # active nowrite ranges */
108 static uint64_t emul64_skipped_io
= 0; /* Skipped I/O operations, because of */
109 /* EMUL64_WRITE_OFF. */
110 static uint64_t emul64_skipped_blk
= 0; /* Skipped blocks because of */
111 /* EMUL64_WRITE_OFF. */
112 static uint64_t emul64_io_ops
= 0; /* Total number of I/O operations */
113 /* including skipped and actual. */
114 static uint64_t emul64_io_blocks
= 0; /* Total number of blocks involved */
115 /* in I/O operations. */
116 static uint64_t emul64_nonzero
= 0; /* Number of non-zero data blocks */
117 /* currently held in memory */
118 static uint64_t emul64_max_list_length
= 0; /* Maximum size of a linked */
119 /* list of non-zero blocks. */
120 uint64_t emul64_taskq_max
= 0; /* emul64_scsi_start uses the taskq */
121 /* mechanism to dispatch work. */
122 /* If the number of entries in the */
123 /* exceeds the maximum for the queue */
124 /* the queue a 1 second delay is */
125 /* encountered in taskq_ent_alloc. */
126 /* This counter counts the number */
127 /* times that this happens. */
130 * Since emul64 does no physical I/O, operations that would normally be I/O
131 * intensive become CPU bound. An example of this is RAID 5
132 * initialization. When the kernel becomes CPU bound, it looks as if the
135 * To avoid this problem, we provide a function, emul64_yield_check, that does a
136 * delay from time to time to yield up the CPU. The following variables
137 * are tunables for this algorithm.
139 * emul64_num_delay_called Number of times we called delay. This is
140 * not really a tunable. Rather it is a
141 * counter that provides useful information
142 * for adjusting the tunables.
143 * emul64_yield_length Number of microseconds to yield the CPU.
144 * emul64_yield_period Number of I/O operations between yields.
145 * emul64_yield_enable emul64 will yield the CPU, only if this
146 * variable contains a non-zero value. This
147 * allows the yield functionality to be turned
148 * off for experimentation purposes.
150 * The value of 1000 for emul64_yield_period has been determined by
151 * experience with running the tests.
153 static uint64_t emul64_num_delay_called
= 0;
154 static int emul64_yield_length
= 1000;
155 static int emul64_yield_period
= 1000;
156 static int emul64_yield_enable
= 1;
157 static kmutex_t emul64_yield_mutex
;
158 static kcondvar_t emul64_yield_cv
;
161 * This array establishes a set of tunable variables that can be set by
162 * defining properties in the emul64.conf file.
164 struct prop_map emul64_properties
[] = {
165 "emul64_collect_stats", &emul64_collect_stats
,
166 "emul64_yield_length", &emul64_yield_length
,
167 "emul64_yield_period", &emul64_yield_period
,
168 "emul64_yield_enable", &emul64_yield_enable
,
169 "emul64_max_task", &emul64_max_task
,
170 "emul64_task_nthreads", &emul64_task_nthreads
173 static unsigned char *emul64_zeros
= NULL
; /* Block of 0s for comparison */
175 extern void emul64_check_cond(struct scsi_pkt
*pkt
, uchar_t key
,
176 uchar_t asc
, uchar_t ascq
);
177 /* ncyl=250000 acyl=2 nhead=24 nsect=357 */
178 uint_t dkg_rpm
= 3600;
180 static int bsd_mode_sense_dad_mode_geometry(struct scsi_pkt
*);
181 static int bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt
*);
182 static int bsd_mode_sense_modepage_disco_reco(struct scsi_pkt
*);
183 static int bsd_mode_sense_dad_mode_format(struct scsi_pkt
*);
184 static int bsd_mode_sense_dad_mode_cache(struct scsi_pkt
*);
185 static int bsd_readblks(struct emul64
*, ushort_t
, ushort_t
, diskaddr_t
,
186 int, unsigned char *);
187 static int bsd_writeblks(struct emul64
*, ushort_t
, ushort_t
, diskaddr_t
,
188 int, unsigned char *);
189 emul64_tgt_t
*find_tgt(struct emul64
*, ushort_t
, ushort_t
);
190 static blklist_t
*bsd_findblk(emul64_tgt_t
*, diskaddr_t
, avl_index_t
*);
191 static void bsd_allocblk(emul64_tgt_t
*, diskaddr_t
, caddr_t
, avl_index_t
);
192 static void bsd_freeblk(emul64_tgt_t
*, blklist_t
*);
193 static void emul64_yield_check();
194 static emul64_rng_overlap_t
bsd_tgt_overlap(emul64_tgt_t
*, diskaddr_t
, int);
196 char *emul64_name
= "emul64";
200 * Initialize globals in this file.
205 emul64_zeros
= kmem_zalloc(DEV_BSIZE
, KM_SLEEP
);
206 mutex_init(&emul64_stats_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
207 mutex_init(&emul64_yield_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
208 cv_init(&emul64_yield_cv
, NULL
, CV_DRIVER
, NULL
);
212 * Clean up globals in this file.
217 cv_destroy(&emul64_yield_cv
);
218 mutex_destroy(&emul64_yield_mutex
);
219 mutex_destroy(&emul64_stats_mutex
);
220 if (emul64_zeros
!= NULL
) {
221 kmem_free(emul64_zeros
, DEV_BSIZE
);
227 * Attempt to get the values of the properties that are specified in the
228 * emul64_properties array. If the property exists, copy its value to the
229 * specified location. All the properties have been assigned default
230 * values in this driver, so if we cannot get the property that is not a
234 emul64_bsd_get_props(dev_info_t
*dip
)
238 struct prop_map
*pmp
;
241 for (pmp
= emul64_properties
, i
= 0;
242 i
< sizeof (emul64_properties
) / sizeof (struct prop_map
);
244 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY
, dip
,
245 DDI_PROP_DONTPASS
, pmp
->pm_name
, &properties
,
246 &count
) == DDI_PROP_SUCCESS
) {
248 *pmp
->pm_value
= *properties
;
250 ddi_prop_free((void *) properties
);
256 emul64_bsd_blkcompare(const void *a1
, const void *b1
)
258 blklist_t
*a
= (blklist_t
*)a1
;
259 blklist_t
*b
= (blklist_t
*)b1
;
261 if (a
->bl_blkno
< b
->bl_blkno
)
263 if (a
->bl_blkno
== b
->bl_blkno
)
270 bsd_scsi_start_stop_unit(struct scsi_pkt
*pkt
)
277 bsd_scsi_test_unit_ready(struct scsi_pkt
*pkt
)
284 bsd_scsi_request_sense(struct scsi_pkt
*pkt
)
290 bsd_scsi_inq_page0(struct scsi_pkt
*pkt
, uchar_t pqdtype
)
292 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
294 if (sp
->cmd_count
< 6) {
295 cmn_err(CE_CONT
, "%s: bsd_scsi_inq_page0: size %d required\n",
300 sp
->cmd_addr
[0] = pqdtype
; /* periph qual., dtype */
301 sp
->cmd_addr
[1] = 0; /* page code */
302 sp
->cmd_addr
[2] = 0; /* reserved */
303 sp
->cmd_addr
[3] = 6 - 3; /* length */
304 sp
->cmd_addr
[4] = 0; /* 1st page */
305 sp
->cmd_addr
[5] = 0x83; /* 2nd page */
307 pkt
->pkt_resid
= sp
->cmd_count
- 6;
312 bsd_scsi_inq_page83(struct scsi_pkt
*pkt
, uchar_t pqdtype
)
314 struct emul64
*emul64
= PKT2EMUL64(pkt
);
315 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
316 int instance
= ddi_get_instance(emul64
->emul64_dip
);
318 if (sp
->cmd_count
< 22) {
319 cmn_err(CE_CONT
, "%s: bsd_scsi_inq_page83: size %d required\n",
324 sp
->cmd_addr
[0] = pqdtype
; /* periph qual., dtype */
325 sp
->cmd_addr
[1] = 0x83; /* page code */
326 sp
->cmd_addr
[2] = 0; /* reserved */
327 sp
->cmd_addr
[3] = (22 - 8) + 4; /* length */
329 sp
->cmd_addr
[4] = 1; /* code set - binary */
330 sp
->cmd_addr
[5] = 3; /* association and device ID type 3 */
331 sp
->cmd_addr
[6] = 0; /* reserved */
332 sp
->cmd_addr
[7] = 22 - 8; /* ID length */
334 sp
->cmd_addr
[8] = 0xde; /* @8: identifier, byte 0 */
335 sp
->cmd_addr
[9] = 0xca;
336 sp
->cmd_addr
[10] = 0xde;
337 sp
->cmd_addr
[11] = 0x80;
339 sp
->cmd_addr
[12] = 0xba;
340 sp
->cmd_addr
[13] = 0xbe;
341 sp
->cmd_addr
[14] = 0xab;
342 sp
->cmd_addr
[15] = 0xba;
346 * Instances seem to be assigned sequentially, so it unlikely that we
347 * will have more than 65535 of them.
349 sp
->cmd_addr
[16] = uint_to_byte1(instance
);
350 sp
->cmd_addr
[17] = uint_to_byte0(instance
);
351 sp
->cmd_addr
[18] = uint_to_byte1(TGT(sp
));
352 sp
->cmd_addr
[19] = uint_to_byte0(TGT(sp
));
353 sp
->cmd_addr
[20] = uint_to_byte1(LUN(sp
));
354 sp
->cmd_addr
[21] = uint_to_byte0(LUN(sp
));
356 pkt
->pkt_resid
= sp
->cmd_count
- 22;
361 bsd_scsi_inquiry(struct scsi_pkt
*pkt
)
363 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
364 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
367 struct scsi_inquiry inq
;
369 EMUL64_MUTEX_ENTER(sp
->cmd_emul64
);
370 tgt
= find_tgt(sp
->cmd_emul64
,
371 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
);
372 EMUL64_MUTEX_EXIT(sp
->cmd_emul64
);
374 if (sp
->cmd_count
< sizeof (inq
)) {
375 cmn_err(CE_CONT
, "%s: bsd_scsi_inquiry: size %d required\n",
376 emul64_name
, (int)sizeof (inq
));
380 if (cdb
->cdb_opaque
[1] & 0xfc) {
381 cmn_err(CE_WARN
, "%s: bsd_scsi_inquiry: 0x%x",
382 emul64_name
, cdb
->cdb_opaque
[1]);
383 emul64_check_cond(pkt
, 0x5, 0x24, 0x0); /* inv. fld in cdb */
387 pqdtype
= tgt
->emul64_tgt_dtype
;
388 if (cdb
->cdb_opaque
[1] & 0x1) {
389 switch (cdb
->cdb_opaque
[2]) {
391 return (bsd_scsi_inq_page0(pkt
, pqdtype
));
393 return (bsd_scsi_inq_page83(pkt
, pqdtype
));
395 cmn_err(CE_WARN
, "%s: bsd_scsi_inquiry: "
397 emul64_name
, cdb
->cdb_opaque
[2]);
402 /* set up the inquiry data we return */
403 (void) bzero((void *)&inq
, sizeof (inq
));
405 inq
.inq_dtype
= pqdtype
;
408 inq
.inq_len
= sizeof (inq
) - 4;
412 (void) bcopy(tgt
->emul64_tgt_inq
, inq
.inq_vid
,
413 sizeof (tgt
->emul64_tgt_inq
));
414 (void) bcopy("1", inq
.inq_revision
, 2);
415 (void) bcopy((void *)&inq
, sp
->cmd_addr
, sizeof (inq
));
417 pkt
->pkt_resid
= sp
->cmd_count
- sizeof (inq
);
423 bsd_scsi_format(struct scsi_pkt
*pkt
)
429 bsd_scsi_io(struct scsi_pkt
*pkt
)
431 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
432 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
436 switch (cdb
->scc_cmd
) {
438 lblkno
= (uint32_t)GETG0ADDR(cdb
);
439 nblks
= GETG0COUNT(cdb
);
440 pkt
->pkt_resid
= bsd_readblks(sp
->cmd_emul64
,
441 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
,
442 lblkno
, nblks
, sp
->cmd_addr
);
444 cmn_err(CE_CONT
, "%s: bsd_scsi_io: "
445 "read g0 blk=%lld (0x%llx) nblks=%d\n",
446 emul64_name
, lblkno
, lblkno
, nblks
);
450 lblkno
= (uint32_t)GETG0ADDR(cdb
);
451 nblks
= GETG0COUNT(cdb
);
452 pkt
->pkt_resid
= bsd_writeblks(sp
->cmd_emul64
,
453 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
,
454 lblkno
, nblks
, sp
->cmd_addr
);
456 cmn_err(CE_CONT
, "%s: bsd_scsi_io: "
457 "write g0 blk=%lld (0x%llx) nblks=%d\n",
458 emul64_name
, lblkno
, lblkno
, nblks
);
462 lblkno
= (uint32_t)GETG1ADDR(cdb
);
463 nblks
= GETG1COUNT(cdb
);
464 pkt
->pkt_resid
= bsd_readblks(sp
->cmd_emul64
,
465 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
,
466 lblkno
, nblks
, sp
->cmd_addr
);
468 cmn_err(CE_CONT
, "%s: bsd_scsi_io: "
469 "read g1 blk=%lld (0x%llx) nblks=%d\n",
470 emul64_name
, lblkno
, lblkno
, nblks
);
474 lblkno
= (uint32_t)GETG1ADDR(cdb
);
475 nblks
= GETG1COUNT(cdb
);
476 pkt
->pkt_resid
= bsd_writeblks(sp
->cmd_emul64
,
477 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
,
478 lblkno
, nblks
, sp
->cmd_addr
);
480 cmn_err(CE_CONT
, "%s: bsd_scsi_io: "
481 "write g1 blk=%lld (0x%llx) nblks=%d\n",
482 emul64_name
, lblkno
, lblkno
, nblks
);
486 lblkno
= GETG4ADDR(cdb
);
488 lblkno
|= (uint32_t)GETG4ADDRTL(cdb
);
489 nblks
= GETG4COUNT(cdb
);
490 pkt
->pkt_resid
= bsd_readblks(sp
->cmd_emul64
,
491 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
,
492 lblkno
, nblks
, sp
->cmd_addr
);
494 cmn_err(CE_CONT
, "%s: bsd_scsi_io: "
495 "read g4 blk=%lld (0x%llx) nblks=%d\n",
496 emul64_name
, lblkno
, lblkno
, nblks
);
500 lblkno
= GETG4ADDR(cdb
);
502 lblkno
|= (uint32_t)GETG4ADDRTL(cdb
);
503 nblks
= GETG4COUNT(cdb
);
504 pkt
->pkt_resid
= bsd_writeblks(sp
->cmd_emul64
,
505 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
,
506 lblkno
, nblks
, sp
->cmd_addr
);
508 cmn_err(CE_CONT
, "%s: bsd_scsi_io: "
509 "write g4 blk=%lld (0x%llx) nblks=%d\n",
510 emul64_name
, lblkno
, lblkno
, nblks
);
514 cmn_err(CE_WARN
, "%s: bsd_scsi_io: unhandled I/O: 0x%x",
515 emul64_name
, cdb
->scc_cmd
);
519 if (pkt
->pkt_resid
!= 0)
520 cmn_err(CE_WARN
, "%s: bsd_scsi_io: "
521 "pkt_resid: 0x%lx, lblkno %lld, nblks %d",
522 emul64_name
, pkt
->pkt_resid
, lblkno
, nblks
);
528 bsd_scsi_log_sense(struct scsi_pkt
*pkt
)
530 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
531 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
534 if (sp
->cmd_count
< 9) {
535 cmn_err(CE_CONT
, "%s: bsd_scsi_log_sense size %d required\n",
540 page_code
= cdb
->cdb_opaque
[2] & 0x3f;
542 cmn_err(CE_CONT
, "%s: bsd_scsi_log_sense: "
543 "page 0x%x not supported\n", emul64_name
, page_code
);
544 emul64_check_cond(pkt
, 0x5, 0x24, 0x0); /* inv. fld in cdb */
548 sp
->cmd_addr
[0] = 0; /* page code */
549 sp
->cmd_addr
[1] = 0; /* reserved */
550 sp
->cmd_addr
[2] = 0; /* MSB of page length */
551 sp
->cmd_addr
[3] = 8 - 3; /* LSB of page length */
553 sp
->cmd_addr
[4] = 0; /* MSB of parameter code */
554 sp
->cmd_addr
[5] = 0; /* LSB of parameter code */
555 sp
->cmd_addr
[6] = 0; /* parameter control byte */
556 sp
->cmd_addr
[7] = 4 - 3; /* parameter length */
557 sp
->cmd_addr
[8] = 0x0; /* parameter value */
559 pkt
->pkt_resid
= sp
->cmd_count
- 9;
564 bsd_scsi_mode_sense(struct scsi_pkt
*pkt
)
566 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
571 switch (cdb
->scc_cmd
) {
572 case SCMD_MODE_SENSE
:
573 page_code
= cdb
->cdb_opaque
[2] & 0x3f;
574 page_control
= (cdb
->cdb_opaque
[2] >> 6) & 0x03;
576 cmn_err(CE_CONT
, "%s: bsd_scsi_mode_sense: "
577 "page=0x%x control=0x%x nbytes=%d\n",
578 emul64_name
, page_code
, page_control
,
582 case SCMD_MODE_SENSE_G1
:
583 page_code
= cdb
->cdb_opaque
[2] & 0x3f;
584 page_control
= (cdb
->cdb_opaque
[2] >> 6) & 0x03;
586 cmn_err(CE_CONT
, "%s: bsd_scsi_mode_sense: "
587 "page=0x%x control=0x%x nbytes=%d\n",
588 emul64_name
, page_code
, page_control
,
593 cmn_err(CE_CONT
, "%s: bsd_scsi_mode_sense: "
594 "cmd 0x%x not supported\n", emul64_name
, cdb
->scc_cmd
);
599 case DAD_MODE_GEOMETRY
:
600 rval
= bsd_mode_sense_dad_mode_geometry(pkt
);
602 case DAD_MODE_ERR_RECOV
:
603 rval
= bsd_mode_sense_dad_mode_err_recov(pkt
);
605 case MODEPAGE_DISCO_RECO
:
606 rval
= bsd_mode_sense_modepage_disco_reco(pkt
);
608 case DAD_MODE_FORMAT
:
609 rval
= bsd_mode_sense_dad_mode_format(pkt
);
612 rval
= bsd_mode_sense_dad_mode_cache(pkt
);
615 cmn_err(CE_CONT
, "%s: bsd_scsi_mode_sense: "
616 "page 0x%x not supported\n", emul64_name
, page_code
);
626 bsd_mode_sense_dad_mode_geometry(struct scsi_pkt
*pkt
)
628 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
629 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
630 uchar_t
*addr
= (uchar_t
*)sp
->cmd_addr
;
633 struct mode_header header
;
634 struct mode_geometry page4
;
638 page_control
= (cdb
->cdb_opaque
[2] >> 6) & 0x03;
641 cmn_err(CE_CONT
, "%s: bsd_mode_sense_dad_mode_geometry: "
642 "pc=%d n=%d\n", emul64_name
, page_control
, sp
->cmd_count
);
645 if (sp
->cmd_count
< (sizeof (header
) + sizeof (page4
))) {
646 cmn_err(CE_CONT
, "%s: bsd_mode_sense_dad_mode_geometry: "
647 "size %d required\n",
648 emul64_name
, (int)(sizeof (header
) + sizeof (page4
)));
652 (void) bzero(&header
, sizeof (header
));
653 (void) bzero(&page4
, sizeof (page4
));
655 header
.length
= sizeof (header
) + sizeof (page4
) - 1;
656 header
.bdesc_length
= 0;
658 page4
.mode_page
.code
= DAD_MODE_GEOMETRY
;
659 page4
.mode_page
.ps
= 1;
660 page4
.mode_page
.length
= sizeof (page4
) - sizeof (struct mode_page
);
662 switch (page_control
) {
663 case MODE_SENSE_PC_CURRENT
:
664 case MODE_SENSE_PC_DEFAULT
:
665 case MODE_SENSE_PC_SAVED
:
666 EMUL64_MUTEX_ENTER(sp
->cmd_emul64
);
667 tgt
= find_tgt(sp
->cmd_emul64
,
668 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
);
669 EMUL64_MUTEX_EXIT(sp
->cmd_emul64
);
670 ncyl
= tgt
->emul64_tgt_ncyls
;
671 page4
.cyl_ub
= uint_to_byte2(ncyl
);
672 page4
.cyl_mb
= uint_to_byte1(ncyl
);
673 page4
.cyl_lb
= uint_to_byte0(ncyl
);
674 page4
.heads
= uint_to_byte0(tgt
->emul64_tgt_nheads
);
675 page4
.rpm
= ushort_to_scsi_ushort(dkg_rpm
);
677 case MODE_SENSE_PC_CHANGEABLE
:
686 (void) bcopy(&header
, addr
, sizeof (header
));
687 (void) bcopy(&page4
, addr
+ sizeof (header
), sizeof (page4
));
689 pkt
->pkt_resid
= sp
->cmd_count
- sizeof (page4
) - sizeof (header
);
696 bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt
*pkt
)
698 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
699 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
700 uchar_t
*addr
= (uchar_t
*)sp
->cmd_addr
;
702 struct mode_header header
;
703 struct mode_err_recov page1
;
706 page_control
= (cdb
->cdb_opaque
[2] >> 6) & 0x03;
709 cmn_err(CE_CONT
, "%s: bsd_mode_sense_dad_mode_err_recov: "
710 "pc=%d n=%d\n", emul64_name
, page_control
, sp
->cmd_count
);
713 if (sp
->cmd_count
< (sizeof (header
) + sizeof (page1
))) {
714 cmn_err(CE_CONT
, "%s: bsd_mode_sense_dad_mode_err_recov: "
715 "size %d required\n",
716 emul64_name
, (int)(sizeof (header
) + sizeof (page1
)));
720 (void) bzero(&header
, sizeof (header
));
721 (void) bzero(&page1
, sizeof (page1
));
723 header
.length
= sizeof (header
) + sizeof (page1
) - 1;
724 header
.bdesc_length
= 0;
726 page1
.mode_page
.code
= DAD_MODE_ERR_RECOV
;
727 page1
.mode_page
.ps
= 1;
728 page1
.mode_page
.length
= sizeof (page1
) - sizeof (struct mode_page
);
730 switch (page_control
) {
731 case MODE_SENSE_PC_CURRENT
:
732 case MODE_SENSE_PC_DEFAULT
:
733 case MODE_SENSE_PC_SAVED
:
735 case MODE_SENSE_PC_CHANGEABLE
:
739 (void) bcopy(&header
, addr
, sizeof (header
));
740 (void) bcopy(&page1
, addr
+ sizeof (header
), sizeof (page1
));
742 pkt
->pkt_resid
= sp
->cmd_count
- sizeof (page1
) - sizeof (header
);
749 bsd_mode_sense_modepage_disco_reco(struct scsi_pkt
*pkt
)
751 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
752 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
754 uchar_t
*addr
= (uchar_t
*)sp
->cmd_addr
;
756 struct mode_header header
;
757 struct mode_disco_reco page2
;
759 page_control
= (cdb
->cdb_opaque
[2] >> 6) & 0x03;
762 cmn_err(CE_CONT
, "%s: bsd_mode_sense_modepage_disco_reco: "
763 "pc=%d n=%d\n", emul64_name
, page_control
, sp
->cmd_count
);
766 if (sp
->cmd_count
< (sizeof (header
) + sizeof (page2
))) {
767 cmn_err(CE_CONT
, "%s: bsd_mode_sense_modepage_disco_reco: "
768 "size %d required\n",
769 emul64_name
, (int)(sizeof (header
) + sizeof (page2
)));
773 (void) bzero(&header
, sizeof (header
));
774 (void) bzero(&page2
, sizeof (page2
));
776 header
.length
= sizeof (header
) + sizeof (page2
) - 1;
777 header
.bdesc_length
= 0;
779 page2
.mode_page
.code
= MODEPAGE_DISCO_RECO
;
780 page2
.mode_page
.ps
= 1;
781 page2
.mode_page
.length
= sizeof (page2
) - sizeof (struct mode_page
);
783 switch (page_control
) {
784 case MODE_SENSE_PC_CURRENT
:
785 case MODE_SENSE_PC_DEFAULT
:
786 case MODE_SENSE_PC_SAVED
:
788 case MODE_SENSE_PC_CHANGEABLE
:
792 (void) bcopy(&header
, addr
, sizeof (header
));
793 (void) bcopy(&page2
, addr
+ sizeof (header
), sizeof (page2
));
795 pkt
->pkt_resid
= sp
->cmd_count
- sizeof (page2
) - sizeof (header
);
802 bsd_mode_sense_dad_mode_format(struct scsi_pkt
*pkt
)
804 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
805 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
806 uchar_t
*addr
= (uchar_t
*)sp
->cmd_addr
;
809 struct mode_header header
;
810 struct mode_format page3
;
813 page_control
= (cdb
->cdb_opaque
[2] >> 6) & 0x03;
816 cmn_err(CE_CONT
, "%s: bsd_mode_sense_dad_mode_format: "
817 "pc=%d n=%d\n", emul64_name
, page_control
, sp
->cmd_count
);
820 if (sp
->cmd_count
< (sizeof (header
) + sizeof (page3
))) {
821 cmn_err(CE_CONT
, "%s: bsd_mode_sense_dad_mode_format: "
822 "size %d required\n",
823 emul64_name
, (int)(sizeof (header
) + sizeof (page3
)));
827 (void) bzero(&header
, sizeof (header
));
828 (void) bzero(&page3
, sizeof (page3
));
830 header
.length
= sizeof (header
) + sizeof (page3
) - 1;
831 header
.bdesc_length
= 0;
833 page3
.mode_page
.code
= DAD_MODE_FORMAT
;
834 page3
.mode_page
.ps
= 1;
835 page3
.mode_page
.length
= sizeof (page3
) - sizeof (struct mode_page
);
837 switch (page_control
) {
838 case MODE_SENSE_PC_CURRENT
:
839 case MODE_SENSE_PC_DEFAULT
:
840 case MODE_SENSE_PC_SAVED
:
841 page3
.data_bytes_sect
= ushort_to_scsi_ushort(DEV_BSIZE
);
842 page3
.interleave
= ushort_to_scsi_ushort(1);
843 EMUL64_MUTEX_ENTER(sp
->cmd_emul64
);
844 tgt
= find_tgt(sp
->cmd_emul64
,
845 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
);
846 EMUL64_MUTEX_EXIT(sp
->cmd_emul64
);
847 page3
.sect_track
= ushort_to_scsi_ushort(tgt
->emul64_tgt_nsect
);
849 case MODE_SENSE_PC_CHANGEABLE
:
853 (void) bcopy(&header
, addr
, sizeof (header
));
854 (void) bcopy(&page3
, addr
+ sizeof (header
), sizeof (page3
));
856 pkt
->pkt_resid
= sp
->cmd_count
- sizeof (page3
) - sizeof (header
);
863 bsd_mode_sense_dad_mode_cache(struct scsi_pkt
*pkt
)
865 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
866 union scsi_cdb
*cdb
= (union scsi_cdb
*)pkt
->pkt_cdbp
;
867 uchar_t
*addr
= (uchar_t
*)sp
->cmd_addr
;
869 struct mode_header header
;
870 struct mode_cache page8
;
873 page_control
= (cdb
->cdb_opaque
[2] >> 6) & 0x03;
876 cmn_err(CE_CONT
, "%s: bsd_mode_sense_dad_mode_cache: "
877 "pc=%d n=%d\n", emul64_name
, page_control
, sp
->cmd_count
);
880 if (sp
->cmd_count
< (sizeof (header
) + sizeof (page8
))) {
881 cmn_err(CE_CONT
, "%s: bsd_mode_sense_dad_mode_cache: "
882 "size %d required\n",
883 emul64_name
, (int)(sizeof (header
) + sizeof (page8
)));
887 (void) bzero(&header
, sizeof (header
));
888 (void) bzero(&page8
, sizeof (page8
));
890 header
.length
= sizeof (header
) + sizeof (page8
) - 1;
891 header
.bdesc_length
= 0;
893 page8
.mode_page
.code
= DAD_MODE_CACHE
;
894 page8
.mode_page
.ps
= 1;
895 page8
.mode_page
.length
= sizeof (page8
) - sizeof (struct mode_page
);
897 switch (page_control
) {
898 case MODE_SENSE_PC_CURRENT
:
899 case MODE_SENSE_PC_DEFAULT
:
900 case MODE_SENSE_PC_SAVED
:
902 case MODE_SENSE_PC_CHANGEABLE
:
906 (void) bcopy(&header
, addr
, sizeof (header
));
907 (void) bcopy(&page8
, addr
+ sizeof (header
), sizeof (page8
));
909 pkt
->pkt_resid
= sp
->cmd_count
- sizeof (page8
) - sizeof (header
);
917 bsd_scsi_mode_select(struct scsi_pkt
*pkt
)
923 bsd_scsi_read_capacity_8(struct scsi_pkt
*pkt
)
925 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
927 struct scsi_capacity cap
;
930 EMUL64_MUTEX_ENTER(sp
->cmd_emul64
);
931 tgt
= find_tgt(sp
->cmd_emul64
,
932 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
);
933 EMUL64_MUTEX_EXIT(sp
->cmd_emul64
);
934 if (tgt
->emul64_tgt_sectors
> 0xffffffff)
935 cap
.capacity
= 0xffffffff;
938 uint32_to_scsi_uint32(tgt
->emul64_tgt_sectors
);
939 cap
.lbasize
= uint32_to_scsi_uint32((uint_t
)DEV_BSIZE
);
941 pkt
->pkt_resid
= sp
->cmd_count
- sizeof (struct scsi_capacity
);
943 (void) bcopy(&cap
, (caddr_t
)sp
->cmd_addr
,
944 sizeof (struct scsi_capacity
));
949 bsd_scsi_read_capacity_16(struct scsi_pkt
*pkt
)
951 struct emul64_cmd
*sp
= PKT2CMD(pkt
);
953 struct scsi_capacity_16 cap
;
956 EMUL64_MUTEX_ENTER(sp
->cmd_emul64
);
957 tgt
= find_tgt(sp
->cmd_emul64
,
958 pkt
->pkt_address
.a_target
, pkt
->pkt_address
.a_lun
);
959 EMUL64_MUTEX_EXIT(sp
->cmd_emul64
);
961 cap
.sc_capacity
= uint64_to_scsi_uint64(tgt
->emul64_tgt_sectors
);
962 cap
.sc_lbasize
= uint32_to_scsi_uint32((uint_t
)DEV_BSIZE
);
966 bzero(&cap
.sc_rsvd1
[0], sizeof (cap
.sc_rsvd1
));
968 pkt
->pkt_resid
= sp
->cmd_count
- sizeof (struct scsi_capacity_16
);
970 (void) bcopy(&cap
, (caddr_t
)sp
->cmd_addr
,
971 sizeof (struct scsi_capacity_16
));
975 bsd_scsi_read_capacity(struct scsi_pkt
*pkt
)
977 return (bsd_scsi_read_capacity_8(pkt
));
983 bsd_scsi_reserve(struct scsi_pkt
*pkt
)
990 bsd_scsi_release(struct scsi_pkt
*pkt
)
997 bsd_scsi_read_defect_list(struct scsi_pkt
*pkt
)
1006 bsd_scsi_reassign_block(struct scsi_pkt
*pkt
)
1013 bsd_readblks(struct emul64
*emul64
, ushort_t target
, ushort_t lun
,
1014 diskaddr_t blkno
, int nblks
, unsigned char *bufaddr
)
1018 emul64_rng_overlap_t overlap
;
1022 cmn_err(CE_CONT
, "%s: bsd_readblks: "
1023 "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1024 emul64_name
, target
, lun
, blkno
, blkno
, nblks
);
1027 emul64_yield_check();
1029 EMUL64_MUTEX_ENTER(emul64
);
1030 tgt
= find_tgt(emul64
, target
, lun
);
1031 EMUL64_MUTEX_EXIT(emul64
);
1033 cmn_err(CE_WARN
, "%s: bsd_readblks: no target for %d,%d\n",
1034 emul64_name
, target
, lun
);
1038 if (emul64_collect_stats
) {
1039 mutex_enter(&emul64_stats_mutex
);
1041 emul64_io_blocks
+= nblks
;
1042 mutex_exit(&emul64_stats_mutex
);
1044 mutex_enter(&tgt
->emul64_tgt_blk_lock
);
1047 * Keep the ioctls from changing the nowrite list for the duration
1048 * of this I/O by grabbing emul64_tgt_nw_lock. This will keep the
1049 * results from our call to bsd_tgt_overlap from changing while we
1052 rw_enter(&tgt
->emul64_tgt_nw_lock
, RW_READER
);
1054 overlap
= bsd_tgt_overlap(tgt
, blkno
, nblks
);
1059 cmn_err(CE_WARN
, "%s: bsd_readblks: "
1060 "read to blocked area %lld,%d\n",
1061 emul64_name
, blkno
, nblks
);
1062 rw_exit(&tgt
->emul64_tgt_nw_lock
);
1067 for (i
= 0; i
< nblks
; i
++) {
1068 if (emul64_debug_blklist
)
1069 cmn_err(CE_CONT
, "%s: bsd_readblks: "
1070 "%d of %d: blkno %lld\n",
1071 emul64_name
, i
+1, nblks
, blkno
);
1072 if (blkno
> tgt
->emul64_tgt_sectors
)
1074 blk
= bsd_findblk(tgt
, blkno
, NULL
);
1076 (void) bcopy(blk
->bl_data
, bufaddr
, DEV_BSIZE
);
1078 (void) bzero(bufaddr
, DEV_BSIZE
);
1081 bufaddr
+= DEV_BSIZE
;
1083 rw_exit(&tgt
->emul64_tgt_nw_lock
);
1086 mutex_exit(&tgt
->emul64_tgt_blk_lock
);
1089 return ((nblks
- i
) * DEV_BSIZE
);
1094 bsd_writeblks(struct emul64
*emul64
, ushort_t target
, ushort_t lun
,
1095 diskaddr_t blkno
, int nblks
, unsigned char *bufaddr
)
1099 emul64_rng_overlap_t overlap
;
1104 cmn_err(CE_CONT
, "%s: bsd_writeblks: "
1105 "<%d,%d> blk %llu (0x%llx) nblks %d\n",
1106 emul64_name
, target
, lun
, blkno
, blkno
, nblks
);
1109 emul64_yield_check();
1111 EMUL64_MUTEX_ENTER(emul64
);
1112 tgt
= find_tgt(emul64
, target
, lun
);
1113 EMUL64_MUTEX_EXIT(emul64
);
1115 cmn_err(CE_WARN
, "%s: bsd_writeblks: no target for %d,%d\n",
1116 emul64_name
, target
, lun
);
1120 if (emul64_collect_stats
) {
1121 mutex_enter(&emul64_stats_mutex
);
1123 emul64_io_blocks
+= nblks
;
1124 mutex_exit(&emul64_stats_mutex
);
1126 mutex_enter(&tgt
->emul64_tgt_blk_lock
);
1129 * Keep the ioctls from changing the nowrite list for the duration
1130 * of this I/O by grabbing emul64_tgt_nw_lock. This will keep the
1131 * results from our call to bsd_tgt_overlap from changing while we
1134 rw_enter(&tgt
->emul64_tgt_nw_lock
, RW_READER
);
1135 overlap
= bsd_tgt_overlap(tgt
, blkno
, nblks
);
1139 if (emul64_collect_stats
) {
1140 mutex_enter(&emul64_stats_mutex
);
1141 emul64_skipped_io
++;
1142 emul64_skipped_blk
+= nblks
;
1143 mutex_exit(&emul64_stats_mutex
);
1145 rw_exit(&tgt
->emul64_tgt_nw_lock
);
1146 mutex_exit(&tgt
->emul64_tgt_blk_lock
);
1152 for (i
= 0; i
< nblks
; i
++) {
1153 if ((overlap
== O_NONE
) ||
1154 (bsd_tgt_overlap(tgt
, blkno
, 1) == O_NONE
)) {
1156 * If there was no overlap for the entire I/O range
1157 * or if there is no overlap for this particular
1158 * block, then we need to do the write.
1160 if (emul64_debug_blklist
)
1161 cmn_err(CE_CONT
, "%s: bsd_writeblks: "
1162 "%d of %d: blkno %lld\n",
1163 emul64_name
, i
+1, nblks
, blkno
);
1164 if (blkno
> tgt
->emul64_tgt_sectors
) {
1165 cmn_err(CE_WARN
, "%s: bsd_writeblks: "
1166 "blkno %lld, tgt_sectors %lld\n",
1168 tgt
->emul64_tgt_sectors
);
1172 blk
= bsd_findblk(tgt
, blkno
, &where
);
1173 if (bcmp(bufaddr
, emul64_zeros
, DEV_BSIZE
) == 0) {
1175 bsd_freeblk(tgt
, blk
);
1179 (void) bcopy(bufaddr
, blk
->bl_data
,
1182 bsd_allocblk(tgt
, blkno
,
1183 (caddr_t
)bufaddr
, where
);
1188 bufaddr
+= DEV_BSIZE
;
1192 * Now that we're done with our I/O, allow the ioctls to change the
1195 rw_exit(&tgt
->emul64_tgt_nw_lock
);
1198 mutex_exit(&tgt
->emul64_tgt_blk_lock
);
1201 return ((nblks
- i
) * DEV_BSIZE
);
1205 find_tgt(struct emul64
*emul64
, ushort_t target
, ushort_t lun
)
1209 tgt
= emul64
->emul64_tgt
;
1211 if (tgt
->emul64_tgt_saddr
.a_target
== target
&&
1212 tgt
->emul64_tgt_saddr
.a_lun
== lun
) {
1215 tgt
= tgt
->emul64_tgt_next
;
1222 * Free all blocks that are part of the specified range.
1225 bsd_freeblkrange(emul64_tgt_t
*tgt
, emul64_range_t
*range
)
1230 ASSERT(mutex_owned(&tgt
->emul64_tgt_blk_lock
));
1231 for (blk
= (blklist_t
*)avl_first(&tgt
->emul64_tgt_data
);
1235 * We need to get the next block pointer now, because blk
1236 * will be freed inside the if statement.
1238 nextblk
= AVL_NEXT(&tgt
->emul64_tgt_data
, blk
);
1240 if (emul64_overlap(range
, blk
->bl_blkno
, (size_t)1) != O_NONE
) {
1241 bsd_freeblk(tgt
, blk
);
1248 bsd_findblk(emul64_tgt_t
*tgt
, diskaddr_t blkno
, avl_index_t
*where
)
1253 ASSERT(mutex_owned(&tgt
->emul64_tgt_blk_lock
));
1255 search
.bl_blkno
= blkno
;
1256 blk
= (blklist_t
*)avl_find(&tgt
->emul64_tgt_data
, &search
, where
);
1262 bsd_allocblk(emul64_tgt_t
*tgt
,
1269 if (emul64_debug_blklist
)
1270 cmn_err(CE_CONT
, "%s: bsd_allocblk: %llu\n",
1271 emul64_name
, blkno
);
1273 ASSERT(mutex_owned(&tgt
->emul64_tgt_blk_lock
));
1275 blk
= (blklist_t
*)kmem_zalloc(sizeof (blklist_t
), KM_SLEEP
);
1276 blk
->bl_data
= (uchar_t
*)kmem_zalloc(DEV_BSIZE
, KM_SLEEP
);
1277 blk
->bl_blkno
= blkno
;
1278 (void) bcopy(data
, blk
->bl_data
, DEV_BSIZE
);
1279 avl_insert(&tgt
->emul64_tgt_data
, (void *) blk
, where
);
1281 if (emul64_collect_stats
) {
1282 mutex_enter(&emul64_stats_mutex
);
1284 tgt
->emul64_list_length
++;
1285 if (tgt
->emul64_list_length
> emul64_max_list_length
) {
1286 emul64_max_list_length
= tgt
->emul64_list_length
;
1288 mutex_exit(&emul64_stats_mutex
);
1293 bsd_freeblk(emul64_tgt_t
*tgt
, blklist_t
*blk
)
1295 if (emul64_debug_blklist
)
1296 cmn_err(CE_CONT
, "%s: bsd_freeblk: <%d,%d> blk=%lld\n",
1297 emul64_name
, tgt
->emul64_tgt_saddr
.a_target
,
1298 tgt
->emul64_tgt_saddr
.a_lun
, blk
->bl_blkno
);
1300 ASSERT(mutex_owned(&tgt
->emul64_tgt_blk_lock
));
1302 avl_remove(&tgt
->emul64_tgt_data
, (void *) blk
);
1303 if (emul64_collect_stats
) {
1304 mutex_enter(&emul64_stats_mutex
);
1306 tgt
->emul64_list_length
--;
1307 mutex_exit(&emul64_stats_mutex
);
1309 kmem_free(blk
->bl_data
, DEV_BSIZE
);
1310 kmem_free(blk
, sizeof (blklist_t
));
1314 * Look for overlap between a nowrite range and a block range.
1316 * NOTE: Callers of this function must hold the tgt->emul64_tgt_nw_lock
1317 * lock. For the purposes of this function, a reader lock is
1320 static emul64_rng_overlap_t
1321 bsd_tgt_overlap(emul64_tgt_t
*tgt
, diskaddr_t blkno
, int count
)
1323 emul64_nowrite_t
*nw
;
1324 emul64_rng_overlap_t rv
= O_NONE
;
1326 for (nw
= tgt
->emul64_tgt_nowrite
;
1327 (nw
!= NULL
) && (rv
== O_NONE
);
1328 nw
= nw
->emul64_nwnext
) {
1329 rv
= emul64_overlap(&nw
->emul64_blocked
, blkno
, (size_t)count
);
1335 * Operations that do a lot of I/O, such as RAID 5 initializations, result
1336 * in a CPU bound kernel when the device is an emul64 device. This makes
1337 * the machine look hung. To avoid this problem, give up the CPU from time
1342 emul64_yield_check()
1344 static uint_t emul64_io_count
= 0; /* # I/Os since last wait */
1345 static uint_t emul64_waiting
= FALSE
; /* TRUE -> a thread is in */
1346 /* cv_timed wait. */
1349 if (emul64_yield_enable
== 0)
1352 mutex_enter(&emul64_yield_mutex
);
1354 if (emul64_waiting
== TRUE
) {
1356 * Another thread has already started the timer. We'll
1357 * just wait here until their time expires, and they
1358 * broadcast to us. When they do that, we'll return and
1359 * let our caller do more I/O.
1361 cv_wait(&emul64_yield_cv
, &emul64_yield_mutex
);
1362 } else if (emul64_io_count
++ > emul64_yield_period
) {
1364 * Set emul64_waiting to let other threads know that we
1365 * have started the timer.
1367 emul64_waiting
= TRUE
;
1368 emul64_num_delay_called
++;
1369 ticks
= drv_usectohz(emul64_yield_length
);
1372 (void) cv_reltimedwait(&emul64_yield_cv
, &emul64_yield_mutex
,
1373 ticks
, TR_CLOCK_TICK
);
1374 emul64_io_count
= 0;
1375 emul64_waiting
= FALSE
;
1377 /* Broadcast in case others are waiting. */
1378 cv_broadcast(&emul64_yield_cv
);
1381 mutex_exit(&emul64_yield_mutex
);