2 * SCSI Device emulation
4 * Copyright (c) 2006 CodeSourcery.
5 * Based on code by Fabrice Bellard
7 * Written by Paul Brook
9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10 * when the allocation length of CDB is smaller
12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13 * MODE SENSE response.
15 * This code is licensed under the LGPL.
17 * Note that this file only handles the SCSI architecture model and device
18 * commands. Emulation of interface/link layer protocols is handled by
19 * the host adapter emulator.
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/hw-version.h"
29 #include "qemu/memalign.h"
30 #include "hw/scsi/scsi.h"
31 #include "migration/qemu-file-types.h"
32 #include "migration/vmstate.h"
33 #include "hw/scsi/emulation.h"
34 #include "scsi/constants.h"
35 #include "sysemu/block-backend.h"
36 #include "sysemu/blockdev.h"
37 #include "hw/block/block.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/qdev-properties-system.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/sysemu.h"
42 #include "qemu/cutils.h"
44 #include "qom/object.h"
50 #define SCSI_WRITE_SAME_MAX (512 * KiB)
51 #define SCSI_DMA_BUF_SIZE (128 * KiB)
52 #define SCSI_MAX_INQUIRY_LEN 256
53 #define SCSI_MAX_MODE_LEN 256
55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB)
57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */
59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base"
61 OBJECT_DECLARE_TYPE(SCSIDiskState
, SCSIDiskClass
, SCSI_DISK_BASE
)
63 struct SCSIDiskClass
{
64 SCSIDeviceClass parent_class
;
66 DMAIOFunc
*dma_writev
;
67 bool (*need_fua_emulation
)(SCSICommand
*cmd
);
68 void (*update_sense
)(SCSIRequest
*r
);
71 typedef struct SCSIDiskReq
{
73 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */
75 uint32_t sector_count
;
78 bool need_fua_emulation
;
84 #define SCSI_DISK_F_REMOVABLE 0
85 #define SCSI_DISK_F_DPOFUA 1
86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2
88 struct SCSIDiskState
{
95 uint64_t max_unmap_size
;
107 * 0x0000 - rotation rate not reported
108 * 0x0001 - non-rotating medium (SSD)
109 * 0x0002-0x0400 - reserved
110 * 0x0401-0xffe - rotations per minute
113 uint16_t rotation_rate
;
116 static void scsi_free_request(SCSIRequest
*req
)
118 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
120 qemu_vfree(r
->iov
.iov_base
);
123 /* Helper function for command completion with sense. */
124 static void scsi_check_condition(SCSIDiskReq
*r
, SCSISense sense
)
126 trace_scsi_disk_check_condition(r
->req
.tag
, sense
.key
, sense
.asc
,
128 scsi_req_build_sense(&r
->req
, sense
);
129 scsi_req_complete(&r
->req
, CHECK_CONDITION
);
132 static void scsi_init_iovec(SCSIDiskReq
*r
, size_t size
)
134 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
136 if (!r
->iov
.iov_base
) {
138 r
->iov
.iov_base
= blk_blockalign(s
->qdev
.conf
.blk
, r
->buflen
);
140 r
->iov
.iov_len
= MIN(r
->sector_count
* BDRV_SECTOR_SIZE
, r
->buflen
);
141 qemu_iovec_init_external(&r
->qiov
, &r
->iov
, 1);
144 static void scsi_disk_save_request(QEMUFile
*f
, SCSIRequest
*req
)
146 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
148 qemu_put_be64s(f
, &r
->sector
);
149 qemu_put_be32s(f
, &r
->sector_count
);
150 qemu_put_be32s(f
, &r
->buflen
);
152 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
153 qemu_put_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
154 } else if (!req
->retry
) {
155 uint32_t len
= r
->iov
.iov_len
;
156 qemu_put_be32s(f
, &len
);
157 qemu_put_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
162 static void scsi_disk_load_request(QEMUFile
*f
, SCSIRequest
*req
)
164 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
166 qemu_get_be64s(f
, &r
->sector
);
167 qemu_get_be32s(f
, &r
->sector_count
);
168 qemu_get_be32s(f
, &r
->buflen
);
170 scsi_init_iovec(r
, r
->buflen
);
171 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
172 qemu_get_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
173 } else if (!r
->req
.retry
) {
175 qemu_get_be32s(f
, &len
);
176 r
->iov
.iov_len
= len
;
177 assert(r
->iov
.iov_len
<= r
->buflen
);
178 qemu_get_buffer(f
, r
->iov
.iov_base
, r
->iov
.iov_len
);
182 qemu_iovec_init_external(&r
->qiov
, &r
->iov
, 1);
186 * scsi_handle_rw_error has two return values. False means that the error
187 * must be ignored, true means that the error has been processed and the
188 * caller should not do anything else for this request. Note that
189 * scsi_handle_rw_error always manages its reference counts, independent
190 * of the return value.
192 static bool scsi_handle_rw_error(SCSIDiskReq
*r
, int ret
, bool acct_failed
)
194 bool is_read
= (r
->req
.cmd
.mode
== SCSI_XFER_FROM_DEV
);
195 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
196 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
197 SCSISense sense
= SENSE_CODE(NO_SENSE
);
199 bool req_has_sense
= false;
200 BlockErrorAction action
;
204 status
= scsi_sense_from_errno(-ret
, &sense
);
207 /* A passthrough command has completed with nonzero status. */
209 if (status
== CHECK_CONDITION
) {
210 req_has_sense
= true;
211 error
= scsi_sense_buf_to_errno(r
->req
.sense
, sizeof(r
->req
.sense
));
218 * Check whether the error has to be handled by the guest or should
219 * rather follow the rerror=/werror= settings. Guest-handled errors
220 * are usually retried immediately, so do not post them to QMP and
221 * do not account them as failed I/O.
224 scsi_sense_buf_is_guest_recoverable(r
->req
.sense
, sizeof(r
->req
.sense
))) {
225 action
= BLOCK_ERROR_ACTION_REPORT
;
228 action
= blk_get_error_action(s
->qdev
.conf
.blk
, is_read
, error
);
229 blk_error_action(s
->qdev
.conf
.blk
, action
, is_read
, error
);
233 case BLOCK_ERROR_ACTION_REPORT
:
235 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
238 sdc
->update_sense(&r
->req
);
239 } else if (status
== CHECK_CONDITION
) {
240 scsi_req_build_sense(&r
->req
, sense
);
242 scsi_req_complete(&r
->req
, status
);
245 case BLOCK_ERROR_ACTION_IGNORE
:
248 case BLOCK_ERROR_ACTION_STOP
:
249 scsi_req_retry(&r
->req
);
253 g_assert_not_reached();
257 static bool scsi_disk_req_check_error(SCSIDiskReq
*r
, int ret
, bool acct_failed
)
259 if (r
->req
.io_canceled
) {
260 scsi_req_cancel_complete(&r
->req
);
265 return scsi_handle_rw_error(r
, ret
, acct_failed
);
271 static void scsi_aio_complete(void *opaque
, int ret
)
273 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
274 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
276 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
278 assert(r
->req
.aiocb
!= NULL
);
281 if (scsi_disk_req_check_error(r
, ret
, true)) {
285 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
286 scsi_req_complete(&r
->req
, GOOD
);
289 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
290 scsi_req_unref(&r
->req
);
293 static bool scsi_is_cmd_fua(SCSICommand
*cmd
)
295 switch (cmd
->buf
[0]) {
302 return (cmd
->buf
[1] & 8) != 0;
307 case WRITE_VERIFY_10
:
308 case WRITE_VERIFY_12
:
309 case WRITE_VERIFY_16
:
319 static void scsi_write_do_fua(SCSIDiskReq
*r
)
321 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
323 assert(r
->req
.aiocb
== NULL
);
324 assert(!r
->req
.io_canceled
);
326 if (r
->need_fua_emulation
) {
327 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
329 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
333 scsi_req_complete(&r
->req
, GOOD
);
334 scsi_req_unref(&r
->req
);
337 static void scsi_dma_complete_noio(SCSIDiskReq
*r
, int ret
)
339 assert(r
->req
.aiocb
== NULL
);
340 if (scsi_disk_req_check_error(r
, ret
, false)) {
344 r
->sector
+= r
->sector_count
;
346 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
347 scsi_write_do_fua(r
);
350 scsi_req_complete(&r
->req
, GOOD
);
354 scsi_req_unref(&r
->req
);
357 /* Called with AioContext lock held */
358 static void scsi_dma_complete(void *opaque
, int ret
)
360 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
361 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
363 assert(r
->req
.aiocb
!= NULL
);
367 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
369 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
371 scsi_dma_complete_noio(r
, ret
);
374 static void scsi_read_complete_noio(SCSIDiskReq
*r
, int ret
)
378 assert(r
->req
.aiocb
== NULL
);
379 if (scsi_disk_req_check_error(r
, ret
, false)) {
383 n
= r
->qiov
.size
/ BDRV_SECTOR_SIZE
;
385 r
->sector_count
-= n
;
386 scsi_req_data(&r
->req
, r
->qiov
.size
);
389 scsi_req_unref(&r
->req
);
392 static void scsi_read_complete(void *opaque
, int ret
)
394 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
395 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
397 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
399 assert(r
->req
.aiocb
!= NULL
);
403 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
405 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
406 trace_scsi_disk_read_complete(r
->req
.tag
, r
->qiov
.size
);
408 scsi_read_complete_noio(r
, ret
);
409 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
412 /* Actually issue a read to the block device. */
413 static void scsi_do_read(SCSIDiskReq
*r
, int ret
)
415 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
416 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
418 assert (r
->req
.aiocb
== NULL
);
419 if (scsi_disk_req_check_error(r
, ret
, false)) {
423 /* The request is used as the AIO opaque value, so add a ref. */
424 scsi_req_ref(&r
->req
);
427 dma_acct_start(s
->qdev
.conf
.blk
, &r
->acct
, r
->req
.sg
, BLOCK_ACCT_READ
);
428 r
->req
.residual
-= r
->req
.sg
->size
;
429 r
->req
.aiocb
= dma_blk_io(blk_get_aio_context(s
->qdev
.conf
.blk
),
430 r
->req
.sg
, r
->sector
<< BDRV_SECTOR_BITS
,
432 sdc
->dma_readv
, r
, scsi_dma_complete
, r
,
433 DMA_DIRECTION_FROM_DEVICE
);
435 scsi_init_iovec(r
, SCSI_DMA_BUF_SIZE
);
436 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
437 r
->qiov
.size
, BLOCK_ACCT_READ
);
438 r
->req
.aiocb
= sdc
->dma_readv(r
->sector
<< BDRV_SECTOR_BITS
, &r
->qiov
,
439 scsi_read_complete
, r
, r
);
443 scsi_req_unref(&r
->req
);
446 static void scsi_do_read_cb(void *opaque
, int ret
)
448 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
449 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
451 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
453 assert (r
->req
.aiocb
!= NULL
);
457 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
459 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
461 scsi_do_read(opaque
, ret
);
462 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
465 /* Read more data from scsi device into buffer. */
466 static void scsi_read_data(SCSIRequest
*req
)
468 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
469 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
472 trace_scsi_disk_read_data_count(r
->sector_count
);
473 if (r
->sector_count
== 0) {
474 /* This also clears the sense buffer for REQUEST SENSE. */
475 scsi_req_complete(&r
->req
, GOOD
);
479 /* No data transfer may already be in progress */
480 assert(r
->req
.aiocb
== NULL
);
482 /* The request is used as the AIO opaque value, so add a ref. */
483 scsi_req_ref(&r
->req
);
484 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
485 trace_scsi_disk_read_data_invalid();
486 scsi_read_complete_noio(r
, -EINVAL
);
490 if (!blk_is_available(req
->dev
->conf
.blk
)) {
491 scsi_read_complete_noio(r
, -ENOMEDIUM
);
497 if (first
&& r
->need_fua_emulation
) {
498 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
500 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_do_read_cb
, r
);
506 static void scsi_write_complete_noio(SCSIDiskReq
*r
, int ret
)
510 assert (r
->req
.aiocb
== NULL
);
511 if (scsi_disk_req_check_error(r
, ret
, false)) {
515 n
= r
->qiov
.size
/ BDRV_SECTOR_SIZE
;
517 r
->sector_count
-= n
;
518 if (r
->sector_count
== 0) {
519 scsi_write_do_fua(r
);
522 scsi_init_iovec(r
, SCSI_DMA_BUF_SIZE
);
523 trace_scsi_disk_write_complete_noio(r
->req
.tag
, r
->qiov
.size
);
524 scsi_req_data(&r
->req
, r
->qiov
.size
);
528 scsi_req_unref(&r
->req
);
531 static void scsi_write_complete(void * opaque
, int ret
)
533 SCSIDiskReq
*r
= (SCSIDiskReq
*)opaque
;
534 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
536 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
538 assert (r
->req
.aiocb
!= NULL
);
542 block_acct_failed(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
544 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
546 scsi_write_complete_noio(r
, ret
);
547 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
550 static void scsi_write_data(SCSIRequest
*req
)
552 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
553 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
554 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
556 /* No data transfer may already be in progress */
557 assert(r
->req
.aiocb
== NULL
);
559 /* The request is used as the AIO opaque value, so add a ref. */
560 scsi_req_ref(&r
->req
);
561 if (r
->req
.cmd
.mode
!= SCSI_XFER_TO_DEV
) {
562 trace_scsi_disk_write_data_invalid();
563 scsi_write_complete_noio(r
, -EINVAL
);
567 if (!r
->req
.sg
&& !r
->qiov
.size
) {
568 /* Called for the first time. Ask the driver to send us more data. */
570 scsi_write_complete_noio(r
, 0);
573 if (!blk_is_available(req
->dev
->conf
.blk
)) {
574 scsi_write_complete_noio(r
, -ENOMEDIUM
);
578 if (r
->req
.cmd
.buf
[0] == VERIFY_10
|| r
->req
.cmd
.buf
[0] == VERIFY_12
||
579 r
->req
.cmd
.buf
[0] == VERIFY_16
) {
581 scsi_dma_complete_noio(r
, 0);
583 scsi_write_complete_noio(r
, 0);
589 dma_acct_start(s
->qdev
.conf
.blk
, &r
->acct
, r
->req
.sg
, BLOCK_ACCT_WRITE
);
590 r
->req
.residual
-= r
->req
.sg
->size
;
591 r
->req
.aiocb
= dma_blk_io(blk_get_aio_context(s
->qdev
.conf
.blk
),
592 r
->req
.sg
, r
->sector
<< BDRV_SECTOR_BITS
,
594 sdc
->dma_writev
, r
, scsi_dma_complete
, r
,
595 DMA_DIRECTION_TO_DEVICE
);
597 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
598 r
->qiov
.size
, BLOCK_ACCT_WRITE
);
599 r
->req
.aiocb
= sdc
->dma_writev(r
->sector
<< BDRV_SECTOR_BITS
, &r
->qiov
,
600 scsi_write_complete
, r
, r
);
604 /* Return a pointer to the data buffer. */
605 static uint8_t *scsi_get_buf(SCSIRequest
*req
)
607 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
609 return (uint8_t *)r
->iov
.iov_base
;
612 static int scsi_disk_emulate_vpd_page(SCSIRequest
*req
, uint8_t *outbuf
)
614 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
615 uint8_t page_code
= req
->cmd
.buf
[2];
616 int start
, buflen
= 0;
618 outbuf
[buflen
++] = s
->qdev
.type
& 0x1f;
619 outbuf
[buflen
++] = page_code
;
620 outbuf
[buflen
++] = 0x00;
621 outbuf
[buflen
++] = 0x00;
625 case 0x00: /* Supported page codes, mandatory */
627 trace_scsi_disk_emulate_vpd_page_00(req
->cmd
.xfer
);
628 outbuf
[buflen
++] = 0x00; /* list of supported pages (this page) */
630 outbuf
[buflen
++] = 0x80; /* unit serial number */
632 outbuf
[buflen
++] = 0x83; /* device identification */
633 if (s
->qdev
.type
== TYPE_DISK
) {
634 outbuf
[buflen
++] = 0xb0; /* block limits */
635 outbuf
[buflen
++] = 0xb1; /* block device characteristics */
636 outbuf
[buflen
++] = 0xb2; /* thin provisioning */
640 case 0x80: /* Device serial number, optional */
645 trace_scsi_disk_emulate_vpd_page_80_not_supported();
649 l
= strlen(s
->serial
);
654 trace_scsi_disk_emulate_vpd_page_80(req
->cmd
.xfer
);
655 memcpy(outbuf
+ buflen
, s
->serial
, l
);
660 case 0x83: /* Device identification page, mandatory */
662 int id_len
= s
->device_id
? MIN(strlen(s
->device_id
), 255 - 8) : 0;
664 trace_scsi_disk_emulate_vpd_page_83(req
->cmd
.xfer
);
667 outbuf
[buflen
++] = 0x2; /* ASCII */
668 outbuf
[buflen
++] = 0; /* not officially assigned */
669 outbuf
[buflen
++] = 0; /* reserved */
670 outbuf
[buflen
++] = id_len
; /* length of data following */
671 memcpy(outbuf
+ buflen
, s
->device_id
, id_len
);
676 outbuf
[buflen
++] = 0x1; /* Binary */
677 outbuf
[buflen
++] = 0x3; /* NAA */
678 outbuf
[buflen
++] = 0; /* reserved */
679 outbuf
[buflen
++] = 8;
680 stq_be_p(&outbuf
[buflen
], s
->qdev
.wwn
);
684 if (s
->qdev
.port_wwn
) {
685 outbuf
[buflen
++] = 0x61; /* SAS / Binary */
686 outbuf
[buflen
++] = 0x93; /* PIV / Target port / NAA */
687 outbuf
[buflen
++] = 0; /* reserved */
688 outbuf
[buflen
++] = 8;
689 stq_be_p(&outbuf
[buflen
], s
->qdev
.port_wwn
);
694 outbuf
[buflen
++] = 0x61; /* SAS / Binary */
696 /* PIV/Target port/relative target port */
697 outbuf
[buflen
++] = 0x94;
699 outbuf
[buflen
++] = 0; /* reserved */
700 outbuf
[buflen
++] = 4;
701 stw_be_p(&outbuf
[buflen
+ 2], s
->port_index
);
706 case 0xb0: /* block limits */
708 SCSIBlockLimits bl
= {};
710 if (s
->qdev
.type
== TYPE_ROM
) {
711 trace_scsi_disk_emulate_vpd_page_b0_not_supported();
716 s
->qdev
.conf
.discard_granularity
/ s
->qdev
.blocksize
;
718 s
->qdev
.conf
.min_io_size
/ s
->qdev
.blocksize
;
720 s
->qdev
.conf
.opt_io_size
/ s
->qdev
.blocksize
;
721 bl
.max_unmap_sectors
=
722 s
->max_unmap_size
/ s
->qdev
.blocksize
;
724 s
->max_io_size
/ s
->qdev
.blocksize
;
725 /* 255 descriptors fit in 4 KiB with an 8-byte header */
726 bl
.max_unmap_descr
= 255;
728 if (s
->qdev
.type
== TYPE_DISK
) {
729 int max_transfer_blk
= blk_get_max_transfer(s
->qdev
.conf
.blk
);
730 int max_io_sectors_blk
=
731 max_transfer_blk
/ s
->qdev
.blocksize
;
734 MIN_NON_ZERO(max_io_sectors_blk
, bl
.max_io_sectors
);
736 buflen
+= scsi_emulate_block_limits(outbuf
+ buflen
, &bl
);
739 case 0xb1: /* block device characteristics */
742 outbuf
[4] = (s
->rotation_rate
>> 8) & 0xff;
743 outbuf
[5] = s
->rotation_rate
& 0xff;
744 outbuf
[6] = 0; /* PRODUCT TYPE */
745 outbuf
[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
746 outbuf
[8] = 0; /* VBULS */
749 case 0xb2: /* thin provisioning */
753 outbuf
[5] = 0xe0; /* unmap & write_same 10/16 all supported */
754 outbuf
[6] = s
->qdev
.conf
.discard_granularity
? 2 : 1;
762 assert(buflen
- start
<= 255);
763 outbuf
[start
- 1] = buflen
- start
;
767 static int scsi_disk_emulate_inquiry(SCSIRequest
*req
, uint8_t *outbuf
)
769 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
772 if (req
->cmd
.buf
[1] & 0x1) {
773 /* Vital product data */
774 return scsi_disk_emulate_vpd_page(req
, outbuf
);
777 /* Standard INQUIRY data */
778 if (req
->cmd
.buf
[2] != 0) {
783 buflen
= req
->cmd
.xfer
;
784 if (buflen
> SCSI_MAX_INQUIRY_LEN
) {
785 buflen
= SCSI_MAX_INQUIRY_LEN
;
788 outbuf
[0] = s
->qdev
.type
& 0x1f;
789 outbuf
[1] = (s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) ? 0x80 : 0;
791 strpadcpy((char *) &outbuf
[16], 16, s
->product
, ' ');
792 strpadcpy((char *) &outbuf
[8], 8, s
->vendor
, ' ');
794 memset(&outbuf
[32], 0, 4);
795 memcpy(&outbuf
[32], s
->version
, MIN(4, strlen(s
->version
)));
797 * We claim conformance to SPC-3, which is required for guests
798 * to ask for modern features like READ CAPACITY(16) or the
799 * block characteristics VPD page by default. Not all of SPC-3
800 * is actually implemented, but we're good enough.
802 outbuf
[2] = s
->qdev
.default_scsi_version
;
803 outbuf
[3] = 2 | 0x10; /* Format 2, HiSup */
806 outbuf
[4] = buflen
- 5; /* Additional Length = (Len - 1) - 4 */
808 /* If the allocation length of CDB is too small,
809 the additional length is not adjusted */
813 /* Sync data transfer and TCQ. */
814 outbuf
[7] = 0x10 | (req
->bus
->info
->tcq
? 0x02 : 0);
818 static inline bool media_is_dvd(SCSIDiskState
*s
)
821 if (s
->qdev
.type
!= TYPE_ROM
) {
824 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
827 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
828 return nb_sectors
> CD_MAX_SECTORS
;
831 static inline bool media_is_cd(SCSIDiskState
*s
)
834 if (s
->qdev
.type
!= TYPE_ROM
) {
837 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
840 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
841 return nb_sectors
<= CD_MAX_SECTORS
;
844 static int scsi_read_disc_information(SCSIDiskState
*s
, SCSIDiskReq
*r
,
847 uint8_t type
= r
->req
.cmd
.buf
[1] & 7;
849 if (s
->qdev
.type
!= TYPE_ROM
) {
853 /* Types 1/2 are only defined for Blu-Ray. */
855 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
859 memset(outbuf
, 0, 34);
861 outbuf
[2] = 0xe; /* last session complete, disc finalized */
862 outbuf
[3] = 1; /* first track on disc */
863 outbuf
[4] = 1; /* # of sessions */
864 outbuf
[5] = 1; /* first track of last session */
865 outbuf
[6] = 1; /* last track of last session */
866 outbuf
[7] = 0x20; /* unrestricted use */
867 outbuf
[8] = 0x00; /* CD-ROM or DVD-ROM */
868 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
869 /* 12-23: not meaningful for CD-ROM or DVD-ROM */
870 /* 24-31: disc bar code */
871 /* 32: disc application code */
872 /* 33: number of OPC tables */
877 static int scsi_read_dvd_structure(SCSIDiskState
*s
, SCSIDiskReq
*r
,
880 static const int rds_caps_size
[5] = {
887 uint8_t media
= r
->req
.cmd
.buf
[1];
888 uint8_t layer
= r
->req
.cmd
.buf
[6];
889 uint8_t format
= r
->req
.cmd
.buf
[7];
892 if (s
->qdev
.type
!= TYPE_ROM
) {
896 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
900 if (format
!= 0xff) {
901 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
902 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
905 if (media_is_cd(s
)) {
906 scsi_check_condition(r
, SENSE_CODE(INCOMPATIBLE_FORMAT
));
909 if (format
>= ARRAY_SIZE(rds_caps_size
)) {
912 size
= rds_caps_size
[format
];
913 memset(outbuf
, 0, size
);
918 /* Physical format information */
923 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
925 outbuf
[4] = 1; /* DVD-ROM, part version 1 */
926 outbuf
[5] = 0xf; /* 120mm disc, minimum rate unspecified */
927 outbuf
[6] = 1; /* one layer, read-only (per MMC-2 spec) */
928 outbuf
[7] = 0; /* default densities */
930 stl_be_p(&outbuf
[12], (nb_sectors
>> 2) - 1); /* end sector */
931 stl_be_p(&outbuf
[16], (nb_sectors
>> 2) - 1); /* l0 end sector */
935 case 0x01: /* DVD copyright information, all zeros */
938 case 0x03: /* BCA information - invalid field for no BCA info */
941 case 0x04: /* DVD disc manufacturing information, all zeros */
944 case 0xff: { /* List capabilities */
947 for (i
= 0; i
< ARRAY_SIZE(rds_caps_size
); i
++) {
948 if (!rds_caps_size
[i
]) {
952 outbuf
[size
+ 1] = 0x40; /* Not writable, readable */
953 stw_be_p(&outbuf
[size
+ 2], rds_caps_size
[i
]);
963 /* Size of buffer, not including 2 byte size field */
964 stw_be_p(outbuf
, size
- 2);
971 static int scsi_event_status_media(SCSIDiskState
*s
, uint8_t *outbuf
)
973 uint8_t event_code
, media_status
;
977 media_status
= MS_TRAY_OPEN
;
978 } else if (blk_is_inserted(s
->qdev
.conf
.blk
)) {
979 media_status
= MS_MEDIA_PRESENT
;
982 /* Event notification descriptor */
983 event_code
= MEC_NO_CHANGE
;
984 if (media_status
!= MS_TRAY_OPEN
) {
985 if (s
->media_event
) {
986 event_code
= MEC_NEW_MEDIA
;
987 s
->media_event
= false;
988 } else if (s
->eject_request
) {
989 event_code
= MEC_EJECT_REQUESTED
;
990 s
->eject_request
= false;
994 outbuf
[0] = event_code
;
995 outbuf
[1] = media_status
;
997 /* These fields are reserved, just clear them. */
1003 static int scsi_get_event_status_notification(SCSIDiskState
*s
, SCSIDiskReq
*r
,
1007 uint8_t *buf
= r
->req
.cmd
.buf
;
1008 uint8_t notification_class_request
= buf
[4];
1009 if (s
->qdev
.type
!= TYPE_ROM
) {
1012 if ((buf
[1] & 1) == 0) {
1018 outbuf
[0] = outbuf
[1] = 0;
1019 outbuf
[3] = 1 << GESN_MEDIA
; /* supported events */
1020 if (notification_class_request
& (1 << GESN_MEDIA
)) {
1021 outbuf
[2] = GESN_MEDIA
;
1022 size
+= scsi_event_status_media(s
, &outbuf
[size
]);
1026 stw_be_p(outbuf
, size
- 4);
1030 static int scsi_get_configuration(SCSIDiskState
*s
, uint8_t *outbuf
)
1034 if (s
->qdev
.type
!= TYPE_ROM
) {
1038 if (media_is_dvd(s
)) {
1039 current
= MMC_PROFILE_DVD_ROM
;
1040 } else if (media_is_cd(s
)) {
1041 current
= MMC_PROFILE_CD_ROM
;
1043 current
= MMC_PROFILE_NONE
;
1046 memset(outbuf
, 0, 40);
1047 stl_be_p(&outbuf
[0], 36); /* Bytes after the data length field */
1048 stw_be_p(&outbuf
[6], current
);
1049 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1050 outbuf
[10] = 0x03; /* persistent, current */
1051 outbuf
[11] = 8; /* two profiles */
1052 stw_be_p(&outbuf
[12], MMC_PROFILE_DVD_ROM
);
1053 outbuf
[14] = (current
== MMC_PROFILE_DVD_ROM
);
1054 stw_be_p(&outbuf
[16], MMC_PROFILE_CD_ROM
);
1055 outbuf
[18] = (current
== MMC_PROFILE_CD_ROM
);
1056 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1057 stw_be_p(&outbuf
[20], 1);
1058 outbuf
[22] = 0x08 | 0x03; /* version 2, persistent, current */
1060 stl_be_p(&outbuf
[24], 1); /* SCSI */
1061 outbuf
[28] = 1; /* DBE = 1, mandatory */
1062 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1063 stw_be_p(&outbuf
[32], 3);
1064 outbuf
[34] = 0x08 | 0x03; /* version 2, persistent, current */
1066 outbuf
[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1067 /* TODO: Random readable, CD read, DVD read, drive serial number,
1072 static int scsi_emulate_mechanism_status(SCSIDiskState
*s
, uint8_t *outbuf
)
1074 if (s
->qdev
.type
!= TYPE_ROM
) {
1077 memset(outbuf
, 0, 8);
1078 outbuf
[5] = 1; /* CD-ROM */
1082 static int mode_sense_page(SCSIDiskState
*s
, int page
, uint8_t **p_outbuf
,
1085 static const int mode_sense_valid
[0x3f] = {
1086 [MODE_PAGE_VENDOR_SPECIFIC
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1087 [MODE_PAGE_HD_GEOMETRY
] = (1 << TYPE_DISK
),
1088 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY
] = (1 << TYPE_DISK
),
1089 [MODE_PAGE_CACHING
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1090 [MODE_PAGE_R_W_ERROR
] = (1 << TYPE_DISK
) | (1 << TYPE_ROM
),
1091 [MODE_PAGE_AUDIO_CTL
] = (1 << TYPE_ROM
),
1092 [MODE_PAGE_CAPABILITIES
] = (1 << TYPE_ROM
),
1093 [MODE_PAGE_APPLE_VENDOR
] = (1 << TYPE_ROM
),
1096 uint8_t *p
= *p_outbuf
+ 2;
1099 assert(page
< ARRAY_SIZE(mode_sense_valid
));
1100 if ((mode_sense_valid
[page
] & (1 << s
->qdev
.type
)) == 0) {
1105 * If Changeable Values are requested, a mask denoting those mode parameters
1106 * that are changeable shall be returned. As we currently don't support
1107 * parameter changes via MODE_SELECT all bits are returned set to zero.
1108 * The buffer was already menset to zero by the caller of this function.
1110 * The offsets here are off by two compared to the descriptions in the
1111 * SCSI specs, because those include a 2-byte header. This is unfortunate,
1112 * but it is done so that offsets are consistent within our implementation
1113 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
1114 * 2-byte and 4-byte headers.
1117 case MODE_PAGE_HD_GEOMETRY
:
1119 if (page_control
== 1) { /* Changeable Values */
1122 /* if a geometry hint is available, use it */
1123 p
[0] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1124 p
[1] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1125 p
[2] = s
->qdev
.conf
.cyls
& 0xff;
1126 p
[3] = s
->qdev
.conf
.heads
& 0xff;
1127 /* Write precomp start cylinder, disabled */
1128 p
[4] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1129 p
[5] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1130 p
[6] = s
->qdev
.conf
.cyls
& 0xff;
1131 /* Reduced current start cylinder, disabled */
1132 p
[7] = (s
->qdev
.conf
.cyls
>> 16) & 0xff;
1133 p
[8] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1134 p
[9] = s
->qdev
.conf
.cyls
& 0xff;
1135 /* Device step rate [ns], 200ns */
1138 /* Landing zone cylinder */
1142 /* Medium rotation rate [rpm], 5400 rpm */
1143 p
[18] = (5400 >> 8) & 0xff;
1144 p
[19] = 5400 & 0xff;
1147 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY
:
1149 if (page_control
== 1) { /* Changeable Values */
1152 /* Transfer rate [kbit/s], 5Mbit/s */
1155 /* if a geometry hint is available, use it */
1156 p
[2] = s
->qdev
.conf
.heads
& 0xff;
1157 p
[3] = s
->qdev
.conf
.secs
& 0xff;
1158 p
[4] = s
->qdev
.blocksize
>> 8;
1159 p
[6] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1160 p
[7] = s
->qdev
.conf
.cyls
& 0xff;
1161 /* Write precomp start cylinder, disabled */
1162 p
[8] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1163 p
[9] = s
->qdev
.conf
.cyls
& 0xff;
1164 /* Reduced current start cylinder, disabled */
1165 p
[10] = (s
->qdev
.conf
.cyls
>> 8) & 0xff;
1166 p
[11] = s
->qdev
.conf
.cyls
& 0xff;
1167 /* Device step rate [100us], 100us */
1170 /* Device step pulse width [us], 1us */
1172 /* Device head settle delay [100us], 100us */
1175 /* Motor on delay [0.1s], 0.1s */
1177 /* Motor off delay [0.1s], 0.1s */
1179 /* Medium rotation rate [rpm], 5400 rpm */
1180 p
[26] = (5400 >> 8) & 0xff;
1181 p
[27] = 5400 & 0xff;
1184 case MODE_PAGE_CACHING
:
1186 if (page_control
== 1 || /* Changeable Values */
1187 blk_enable_write_cache(s
->qdev
.conf
.blk
)) {
1192 case MODE_PAGE_R_W_ERROR
:
1194 if (page_control
== 1) { /* Changeable Values */
1195 if (s
->qdev
.type
== TYPE_ROM
) {
1196 /* Automatic Write Reallocation Enabled */
1201 p
[0] = 0x80; /* Automatic Write Reallocation Enabled */
1202 if (s
->qdev
.type
== TYPE_ROM
) {
1203 p
[1] = 0x20; /* Read Retry Count */
1207 case MODE_PAGE_AUDIO_CTL
:
1211 case MODE_PAGE_CAPABILITIES
:
1213 if (page_control
== 1) { /* Changeable Values */
1217 p
[0] = 0x3b; /* CD-R & CD-RW read */
1218 p
[1] = 0; /* Writing not supported */
1219 p
[2] = 0x7f; /* Audio, composite, digital out,
1220 mode 2 form 1&2, multi session */
1221 p
[3] = 0xff; /* CD DA, DA accurate, RW supported,
1222 RW corrected, C2 errors, ISRC,
1224 p
[4] = 0x2d | (s
->tray_locked
? 2 : 0);
1225 /* Locking supported, jumper present, eject, tray */
1226 p
[5] = 0; /* no volume & mute control, no
1228 p
[6] = (50 * 176) >> 8; /* 50x read speed */
1229 p
[7] = (50 * 176) & 0xff;
1230 p
[8] = 2 >> 8; /* Two volume levels */
1232 p
[10] = 2048 >> 8; /* 2M buffer */
1233 p
[11] = 2048 & 0xff;
1234 p
[12] = (16 * 176) >> 8; /* 16x read speed current */
1235 p
[13] = (16 * 176) & 0xff;
1236 p
[16] = (16 * 176) >> 8; /* 16x write speed */
1237 p
[17] = (16 * 176) & 0xff;
1238 p
[18] = (16 * 176) >> 8; /* 16x write speed current */
1239 p
[19] = (16 * 176) & 0xff;
1242 case MODE_PAGE_APPLE_VENDOR
:
1243 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
)) {
1245 if (page_control
== 1) { /* Changeable Values */
1249 memset(p
, 0, length
);
1250 strcpy((char *)p
+ 8, "APPLE COMPUTER, INC ");
1256 case MODE_PAGE_VENDOR_SPECIFIC
:
1257 if (s
->qdev
.type
== TYPE_DISK
&& (s
->quirks
&
1258 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
))) {
1260 if (page_control
== 1) { /* Changeable Values */
1276 assert(length
< 256);
1277 (*p_outbuf
)[0] = page
;
1278 (*p_outbuf
)[1] = length
;
1279 *p_outbuf
+= length
+ 2;
1283 static int scsi_disk_emulate_mode_sense(SCSIDiskReq
*r
, uint8_t *outbuf
)
1285 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1286 uint64_t nb_sectors
;
1288 int page
, buflen
, ret
, page_control
;
1290 uint8_t dev_specific_param
;
1292 dbd
= (r
->req
.cmd
.buf
[1] & 0x8) != 0;
1293 page
= r
->req
.cmd
.buf
[2] & 0x3f;
1294 page_control
= (r
->req
.cmd
.buf
[2] & 0xc0) >> 6;
1296 trace_scsi_disk_emulate_mode_sense((r
->req
.cmd
.buf
[0] == MODE_SENSE
) ? 6 :
1297 10, page
, r
->req
.cmd
.xfer
, page_control
);
1298 memset(outbuf
, 0, r
->req
.cmd
.xfer
);
1301 if (s
->qdev
.type
== TYPE_DISK
) {
1302 dev_specific_param
= s
->features
& (1 << SCSI_DISK_F_DPOFUA
) ? 0x10 : 0;
1303 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1304 dev_specific_param
|= 0x80; /* Readonly. */
1307 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD
)) {
1308 /* Use DBD from the request... */
1309 dev_specific_param
= 0x00;
1312 * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR
1313 * which should never return a block descriptor even though DBD is
1314 * not set, otherwise CDROM detection fails in MacOS
1316 if (s
->quirks
& (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
) &&
1317 page
== MODE_PAGE_APPLE_VENDOR
) {
1322 * MMC prescribes that CD/DVD drives have no block descriptors,
1323 * and defines no device-specific parameter.
1325 dev_specific_param
= 0x00;
1330 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1331 p
[1] = 0; /* Default media type. */
1332 p
[2] = dev_specific_param
;
1333 p
[3] = 0; /* Block descriptor length. */
1335 } else { /* MODE_SENSE_10 */
1336 p
[2] = 0; /* Default media type. */
1337 p
[3] = dev_specific_param
;
1338 p
[6] = p
[7] = 0; /* Block descriptor length. */
1342 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
1343 if (!dbd
&& nb_sectors
) {
1344 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1345 outbuf
[3] = 8; /* Block descriptor length */
1346 } else { /* MODE_SENSE_10 */
1347 outbuf
[7] = 8; /* Block descriptor length */
1349 nb_sectors
/= (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1350 if (nb_sectors
> 0xffffff) {
1353 p
[0] = 0; /* media density code */
1354 p
[1] = (nb_sectors
>> 16) & 0xff;
1355 p
[2] = (nb_sectors
>> 8) & 0xff;
1356 p
[3] = nb_sectors
& 0xff;
1357 p
[4] = 0; /* reserved */
1358 p
[5] = 0; /* bytes 5-7 are the sector size in bytes */
1359 p
[6] = s
->qdev
.blocksize
>> 8;
1364 if (page_control
== 3) {
1366 scsi_check_condition(r
, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED
));
1371 for (page
= 0; page
<= 0x3e; page
++) {
1372 mode_sense_page(s
, page
, &p
, page_control
);
1375 ret
= mode_sense_page(s
, page
, &p
, page_control
);
1381 buflen
= p
- outbuf
;
1383 * The mode data length field specifies the length in bytes of the
1384 * following data that is available to be transferred. The mode data
1385 * length does not include itself.
1387 if (r
->req
.cmd
.buf
[0] == MODE_SENSE
) {
1388 outbuf
[0] = buflen
- 1;
1389 } else { /* MODE_SENSE_10 */
1390 outbuf
[0] = ((buflen
- 2) >> 8) & 0xff;
1391 outbuf
[1] = (buflen
- 2) & 0xff;
1396 static int scsi_disk_emulate_read_toc(SCSIRequest
*req
, uint8_t *outbuf
)
1398 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1399 int start_track
, format
, msf
, toclen
;
1400 uint64_t nb_sectors
;
1402 msf
= req
->cmd
.buf
[1] & 2;
1403 format
= req
->cmd
.buf
[2] & 0xf;
1404 start_track
= req
->cmd
.buf
[6];
1405 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
1406 trace_scsi_disk_emulate_read_toc(start_track
, format
, msf
>> 1);
1407 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
1410 toclen
= cdrom_read_toc(nb_sectors
, outbuf
, msf
, start_track
);
1413 /* multi session : only a single session defined */
1415 memset(outbuf
, 0, 12);
1421 toclen
= cdrom_read_toc_raw(nb_sectors
, outbuf
, msf
, start_track
);
1429 static int scsi_disk_emulate_start_stop(SCSIDiskReq
*r
)
1431 SCSIRequest
*req
= &r
->req
;
1432 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1433 bool start
= req
->cmd
.buf
[4] & 1;
1434 bool loej
= req
->cmd
.buf
[4] & 2; /* load on start, eject on !start */
1435 int pwrcnd
= req
->cmd
.buf
[4] & 0xf0;
1438 /* eject/load only happens for power condition == 0 */
1442 if ((s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) && loej
) {
1443 if (!start
&& !s
->tray_open
&& s
->tray_locked
) {
1444 scsi_check_condition(r
,
1445 blk_is_inserted(s
->qdev
.conf
.blk
)
1446 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED
)
1447 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED
));
1451 if (s
->tray_open
!= !start
) {
1452 blk_eject(s
->qdev
.conf
.blk
, !start
);
1453 s
->tray_open
= !start
;
1459 static void scsi_disk_emulate_read_data(SCSIRequest
*req
)
1461 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1462 int buflen
= r
->iov
.iov_len
;
1465 trace_scsi_disk_emulate_read_data(buflen
);
1468 scsi_req_data(&r
->req
, buflen
);
1472 /* This also clears the sense buffer for REQUEST SENSE. */
1473 scsi_req_complete(&r
->req
, GOOD
);
1476 static int scsi_disk_check_mode_select(SCSIDiskState
*s
, int page
,
1477 uint8_t *inbuf
, int inlen
)
1479 uint8_t mode_current
[SCSI_MAX_MODE_LEN
];
1480 uint8_t mode_changeable
[SCSI_MAX_MODE_LEN
];
1482 int len
, expected_len
, changeable_len
, i
;
1484 /* The input buffer does not include the page header, so it is
1487 expected_len
= inlen
+ 2;
1488 if (expected_len
> SCSI_MAX_MODE_LEN
) {
1492 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
1493 if (page
== MODE_PAGE_ALLS
) {
1498 memset(mode_current
, 0, inlen
+ 2);
1499 len
= mode_sense_page(s
, page
, &p
, 0);
1500 if (len
< 0 || len
!= expected_len
) {
1504 p
= mode_changeable
;
1505 memset(mode_changeable
, 0, inlen
+ 2);
1506 changeable_len
= mode_sense_page(s
, page
, &p
, 1);
1507 assert(changeable_len
== len
);
1509 /* Check that unchangeable bits are the same as what MODE SENSE
1512 for (i
= 2; i
< len
; i
++) {
1513 if (((mode_current
[i
] ^ inbuf
[i
- 2]) & ~mode_changeable
[i
]) != 0) {
1520 static void scsi_disk_apply_mode_select(SCSIDiskState
*s
, int page
, uint8_t *p
)
1523 case MODE_PAGE_CACHING
:
1524 blk_set_enable_write_cache(s
->qdev
.conf
.blk
, (p
[0] & 4) != 0);
1532 static int mode_select_pages(SCSIDiskReq
*r
, uint8_t *p
, int len
, bool change
)
1534 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1537 int page
, subpage
, page_len
;
1539 /* Parse both possible formats for the mode page headers. */
1543 goto invalid_param_len
;
1546 page_len
= lduw_be_p(&p
[2]);
1551 goto invalid_param_len
;
1562 if (page_len
> len
) {
1563 if (!(s
->quirks
& SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED
)) {
1564 goto invalid_param_len
;
1566 trace_scsi_disk_mode_select_page_truncated(page
, page_len
, len
);
1570 if (scsi_disk_check_mode_select(s
, page
, p
, page_len
) < 0) {
1574 scsi_disk_apply_mode_select(s
, page
, p
);
1583 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM
));
1587 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1591 static void scsi_disk_emulate_mode_select(SCSIDiskReq
*r
, uint8_t *inbuf
)
1593 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1595 int cmd
= r
->req
.cmd
.buf
[0];
1596 int len
= r
->req
.cmd
.xfer
;
1597 int hdr_len
= (cmd
== MODE_SELECT
? 4 : 8);
1601 if ((r
->req
.cmd
.buf
[1] & 0x11) != 0x10) {
1603 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
))) {
1604 /* We only support PF=1, SP=0. */
1609 if (len
< hdr_len
) {
1610 goto invalid_param_len
;
1613 bd_len
= (cmd
== MODE_SELECT
? p
[3] : lduw_be_p(&p
[6]));
1617 goto invalid_param_len
;
1619 if (bd_len
!= 0 && bd_len
!= 8) {
1623 /* Allow changing the block size */
1625 bs
= p
[5] << 16 | p
[6] << 8 | p
[7];
1628 * Since the existing code only checks/updates bits 8-15 of the block
1629 * size, restrict ourselves to the same requirement for now to ensure
1630 * that a block size set by a block descriptor and then read back by
1631 * a subsequent SCSI command will be the same
1633 if (bs
&& !(bs
& ~0xff00) && bs
!= s
->qdev
.blocksize
) {
1634 s
->qdev
.blocksize
= bs
;
1635 trace_scsi_disk_mode_select_set_blocksize(s
->qdev
.blocksize
);
1642 /* Ensure no change is made if there is an error! */
1643 for (pass
= 0; pass
< 2; pass
++) {
1644 if (mode_select_pages(r
, p
, len
, pass
== 1) < 0) {
1649 if (!blk_enable_write_cache(s
->qdev
.conf
.blk
)) {
1650 /* The request is used as the AIO opaque value, so add a ref. */
1651 scsi_req_ref(&r
->req
);
1652 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
1654 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
1658 scsi_req_complete(&r
->req
, GOOD
);
1662 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM
));
1666 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1670 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1673 /* sector_num and nb_sectors expected to be in qdev blocksize */
1674 static inline bool check_lba_range(SCSIDiskState
*s
,
1675 uint64_t sector_num
, uint32_t nb_sectors
)
1678 * The first line tests that no overflow happens when computing the last
1679 * sector. The second line tests that the last accessed sector is in
1682 * Careful, the computations should not underflow for nb_sectors == 0,
1683 * and a 0-block read to the first LBA beyond the end of device is
1686 return (sector_num
<= sector_num
+ nb_sectors
&&
1687 sector_num
+ nb_sectors
<= s
->qdev
.max_lba
+ 1);
1690 typedef struct UnmapCBData
{
1696 static void scsi_unmap_complete(void *opaque
, int ret
);
1698 static void scsi_unmap_complete_noio(UnmapCBData
*data
, int ret
)
1700 SCSIDiskReq
*r
= data
->r
;
1701 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1703 assert(r
->req
.aiocb
== NULL
);
1705 if (data
->count
> 0) {
1706 uint64_t sector_num
= ldq_be_p(&data
->inbuf
[0]);
1707 uint32_t nb_sectors
= ldl_be_p(&data
->inbuf
[8]) & 0xffffffffULL
;
1708 r
->sector
= sector_num
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1709 r
->sector_count
= nb_sectors
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1711 if (!check_lba_range(s
, sector_num
, nb_sectors
)) {
1712 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
),
1714 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
1718 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1719 r
->sector_count
* BDRV_SECTOR_SIZE
,
1722 r
->req
.aiocb
= blk_aio_pdiscard(s
->qdev
.conf
.blk
,
1723 r
->sector
* BDRV_SECTOR_SIZE
,
1724 r
->sector_count
* BDRV_SECTOR_SIZE
,
1725 scsi_unmap_complete
, data
);
1731 scsi_req_complete(&r
->req
, GOOD
);
1734 scsi_req_unref(&r
->req
);
1738 static void scsi_unmap_complete(void *opaque
, int ret
)
1740 UnmapCBData
*data
= opaque
;
1741 SCSIDiskReq
*r
= data
->r
;
1742 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1744 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
1746 assert(r
->req
.aiocb
!= NULL
);
1747 r
->req
.aiocb
= NULL
;
1749 if (scsi_disk_req_check_error(r
, ret
, true)) {
1750 scsi_req_unref(&r
->req
);
1753 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
1754 scsi_unmap_complete_noio(data
, ret
);
1756 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1759 static void scsi_disk_emulate_unmap(SCSIDiskReq
*r
, uint8_t *inbuf
)
1761 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1763 int len
= r
->req
.cmd
.xfer
;
1766 /* Reject ANCHOR=1. */
1767 if (r
->req
.cmd
.buf
[1] & 0x1) {
1772 goto invalid_param_len
;
1774 if (len
< lduw_be_p(&p
[0]) + 2) {
1775 goto invalid_param_len
;
1777 if (len
< lduw_be_p(&p
[2]) + 8) {
1778 goto invalid_param_len
;
1780 if (lduw_be_p(&p
[2]) & 15) {
1781 goto invalid_param_len
;
1784 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1785 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1786 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
1790 data
= g_new0(UnmapCBData
, 1);
1792 data
->inbuf
= &p
[8];
1793 data
->count
= lduw_be_p(&p
[2]) >> 4;
1795 /* The matching unref is in scsi_unmap_complete, before data is freed. */
1796 scsi_req_ref(&r
->req
);
1797 scsi_unmap_complete_noio(data
, 0);
1801 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1802 scsi_check_condition(r
, SENSE_CODE(INVALID_PARAM_LEN
));
1806 block_acct_invalid(blk_get_stats(s
->qdev
.conf
.blk
), BLOCK_ACCT_UNMAP
);
1807 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1810 typedef struct WriteSameCBData
{
1818 static void scsi_write_same_complete(void *opaque
, int ret
)
1820 WriteSameCBData
*data
= opaque
;
1821 SCSIDiskReq
*r
= data
->r
;
1822 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
1824 aio_context_acquire(blk_get_aio_context(s
->qdev
.conf
.blk
));
1826 assert(r
->req
.aiocb
!= NULL
);
1827 r
->req
.aiocb
= NULL
;
1829 if (scsi_disk_req_check_error(r
, ret
, true)) {
1833 block_acct_done(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
);
1835 data
->nb_sectors
-= data
->iov
.iov_len
/ BDRV_SECTOR_SIZE
;
1836 data
->sector
+= data
->iov
.iov_len
/ BDRV_SECTOR_SIZE
;
1837 data
->iov
.iov_len
= MIN(data
->nb_sectors
* BDRV_SECTOR_SIZE
,
1839 if (data
->iov
.iov_len
) {
1840 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1841 data
->iov
.iov_len
, BLOCK_ACCT_WRITE
);
1842 /* Reinitialize qiov, to handle unaligned WRITE SAME request
1843 * where final qiov may need smaller size */
1844 qemu_iovec_init_external(&data
->qiov
, &data
->iov
, 1);
1845 r
->req
.aiocb
= blk_aio_pwritev(s
->qdev
.conf
.blk
,
1846 data
->sector
<< BDRV_SECTOR_BITS
,
1848 scsi_write_same_complete
, data
);
1849 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1853 scsi_req_complete(&r
->req
, GOOD
);
1856 scsi_req_unref(&r
->req
);
1857 qemu_vfree(data
->iov
.iov_base
);
1859 aio_context_release(blk_get_aio_context(s
->qdev
.conf
.blk
));
1862 static void scsi_disk_emulate_write_same(SCSIDiskReq
*r
, uint8_t *inbuf
)
1864 SCSIRequest
*req
= &r
->req
;
1865 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1866 uint32_t nb_sectors
= scsi_data_cdb_xfer(r
->req
.cmd
.buf
);
1867 WriteSameCBData
*data
;
1871 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */
1872 if (nb_sectors
== 0 || (req
->cmd
.buf
[1] & 0x16)) {
1873 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1877 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
1878 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
1881 if (!check_lba_range(s
, r
->req
.cmd
.lba
, nb_sectors
)) {
1882 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
1886 if ((req
->cmd
.buf
[1] & 0x1) || buffer_is_zero(inbuf
, s
->qdev
.blocksize
)) {
1887 int flags
= (req
->cmd
.buf
[1] & 0x8) ? BDRV_REQ_MAY_UNMAP
: 0;
1889 /* The request is used as the AIO opaque value, so add a ref. */
1890 scsi_req_ref(&r
->req
);
1891 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1892 nb_sectors
* s
->qdev
.blocksize
,
1894 r
->req
.aiocb
= blk_aio_pwrite_zeroes(s
->qdev
.conf
.blk
,
1895 r
->req
.cmd
.lba
* s
->qdev
.blocksize
,
1896 nb_sectors
* s
->qdev
.blocksize
,
1897 flags
, scsi_aio_complete
, r
);
1901 data
= g_new0(WriteSameCBData
, 1);
1903 data
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1904 data
->nb_sectors
= nb_sectors
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
1905 data
->iov
.iov_len
= MIN(data
->nb_sectors
* BDRV_SECTOR_SIZE
,
1906 SCSI_WRITE_SAME_MAX
);
1907 data
->iov
.iov_base
= buf
= blk_blockalign(s
->qdev
.conf
.blk
,
1909 qemu_iovec_init_external(&data
->qiov
, &data
->iov
, 1);
1911 for (i
= 0; i
< data
->iov
.iov_len
; i
+= l
) {
1912 l
= MIN(s
->qdev
.blocksize
, data
->iov
.iov_len
- i
);
1913 memcpy(&buf
[i
], inbuf
, l
);
1916 scsi_req_ref(&r
->req
);
1917 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
,
1918 data
->iov
.iov_len
, BLOCK_ACCT_WRITE
);
1919 r
->req
.aiocb
= blk_aio_pwritev(s
->qdev
.conf
.blk
,
1920 data
->sector
<< BDRV_SECTOR_BITS
,
1922 scsi_write_same_complete
, data
);
1925 static void scsi_disk_emulate_write_data(SCSIRequest
*req
)
1927 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1929 if (r
->iov
.iov_len
) {
1930 int buflen
= r
->iov
.iov_len
;
1931 trace_scsi_disk_emulate_write_data(buflen
);
1933 scsi_req_data(&r
->req
, buflen
);
1937 switch (req
->cmd
.buf
[0]) {
1939 case MODE_SELECT_10
:
1940 /* This also clears the sense buffer for REQUEST SENSE. */
1941 scsi_disk_emulate_mode_select(r
, r
->iov
.iov_base
);
1945 scsi_disk_emulate_unmap(r
, r
->iov
.iov_base
);
1951 if (r
->req
.status
== -1) {
1952 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
1958 scsi_disk_emulate_write_same(r
, r
->iov
.iov_base
);
1966 static int32_t scsi_disk_emulate_command(SCSIRequest
*req
, uint8_t *buf
)
1968 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
1969 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
1970 uint64_t nb_sectors
;
1974 switch (req
->cmd
.buf
[0]) {
1983 case ALLOW_MEDIUM_REMOVAL
:
1984 case GET_CONFIGURATION
:
1985 case GET_EVENT_STATUS_NOTIFICATION
:
1986 case MECHANISM_STATUS
:
1991 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
1992 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
1999 * FIXME: we shouldn't return anything bigger than 4k, but the code
2000 * requires the buffer to be as big as req->cmd.xfer in several
2001 * places. So, do not allow CDBs with a very large ALLOCATION
2002 * LENGTH. The real fix would be to modify scsi_read_data and
2003 * dma_buf_read, so that they return data beyond the buflen
2006 if (req
->cmd
.xfer
> 65536) {
2007 goto illegal_request
;
2009 r
->buflen
= MAX(4096, req
->cmd
.xfer
);
2011 if (!r
->iov
.iov_base
) {
2012 r
->iov
.iov_base
= blk_blockalign(s
->qdev
.conf
.blk
, r
->buflen
);
2015 outbuf
= r
->iov
.iov_base
;
2016 memset(outbuf
, 0, r
->buflen
);
2017 switch (req
->cmd
.buf
[0]) {
2018 case TEST_UNIT_READY
:
2019 assert(blk_is_available(s
->qdev
.conf
.blk
));
2022 buflen
= scsi_disk_emulate_inquiry(req
, outbuf
);
2024 goto illegal_request
;
2029 buflen
= scsi_disk_emulate_mode_sense(r
, outbuf
);
2031 goto illegal_request
;
2035 buflen
= scsi_disk_emulate_read_toc(req
, outbuf
);
2037 goto illegal_request
;
2041 if (req
->cmd
.buf
[1] & 1) {
2042 goto illegal_request
;
2046 if (req
->cmd
.buf
[1] & 3) {
2047 goto illegal_request
;
2051 if (req
->cmd
.buf
[1] & 1) {
2052 goto illegal_request
;
2056 if (req
->cmd
.buf
[1] & 3) {
2057 goto illegal_request
;
2061 if (scsi_disk_emulate_start_stop(r
) < 0) {
2065 case ALLOW_MEDIUM_REMOVAL
:
2066 s
->tray_locked
= req
->cmd
.buf
[4] & 1;
2067 blk_lock_medium(s
->qdev
.conf
.blk
, req
->cmd
.buf
[4] & 1);
2069 case READ_CAPACITY_10
:
2070 /* The normal LEN field for this command is zero. */
2071 memset(outbuf
, 0, 8);
2072 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2074 scsi_check_condition(r
, SENSE_CODE(LUN_NOT_READY
));
2077 if ((req
->cmd
.buf
[8] & 1) == 0 && req
->cmd
.lba
) {
2078 goto illegal_request
;
2080 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2081 /* Returned value is the address of the last sector. */
2083 /* Remember the new size for read/write sanity checking. */
2084 s
->qdev
.max_lba
= nb_sectors
;
2085 /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2086 if (nb_sectors
> UINT32_MAX
) {
2087 nb_sectors
= UINT32_MAX
;
2089 outbuf
[0] = (nb_sectors
>> 24) & 0xff;
2090 outbuf
[1] = (nb_sectors
>> 16) & 0xff;
2091 outbuf
[2] = (nb_sectors
>> 8) & 0xff;
2092 outbuf
[3] = nb_sectors
& 0xff;
2095 outbuf
[6] = s
->qdev
.blocksize
>> 8;
2099 /* Just return "NO SENSE". */
2100 buflen
= scsi_convert_sense(NULL
, 0, outbuf
, r
->buflen
,
2101 (req
->cmd
.buf
[1] & 1) == 0);
2103 goto illegal_request
;
2106 case MECHANISM_STATUS
:
2107 buflen
= scsi_emulate_mechanism_status(s
, outbuf
);
2109 goto illegal_request
;
2112 case GET_CONFIGURATION
:
2113 buflen
= scsi_get_configuration(s
, outbuf
);
2115 goto illegal_request
;
2118 case GET_EVENT_STATUS_NOTIFICATION
:
2119 buflen
= scsi_get_event_status_notification(s
, r
, outbuf
);
2121 goto illegal_request
;
2124 case READ_DISC_INFORMATION
:
2125 buflen
= scsi_read_disc_information(s
, r
, outbuf
);
2127 goto illegal_request
;
2130 case READ_DVD_STRUCTURE
:
2131 buflen
= scsi_read_dvd_structure(s
, r
, outbuf
);
2133 goto illegal_request
;
2136 case SERVICE_ACTION_IN_16
:
2137 /* Service Action In subcommands. */
2138 if ((req
->cmd
.buf
[1] & 31) == SAI_READ_CAPACITY_16
) {
2139 trace_scsi_disk_emulate_command_SAI_16();
2140 memset(outbuf
, 0, req
->cmd
.xfer
);
2141 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2143 scsi_check_condition(r
, SENSE_CODE(LUN_NOT_READY
));
2146 if ((req
->cmd
.buf
[14] & 1) == 0 && req
->cmd
.lba
) {
2147 goto illegal_request
;
2149 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2150 /* Returned value is the address of the last sector. */
2152 /* Remember the new size for read/write sanity checking. */
2153 s
->qdev
.max_lba
= nb_sectors
;
2154 outbuf
[0] = (nb_sectors
>> 56) & 0xff;
2155 outbuf
[1] = (nb_sectors
>> 48) & 0xff;
2156 outbuf
[2] = (nb_sectors
>> 40) & 0xff;
2157 outbuf
[3] = (nb_sectors
>> 32) & 0xff;
2158 outbuf
[4] = (nb_sectors
>> 24) & 0xff;
2159 outbuf
[5] = (nb_sectors
>> 16) & 0xff;
2160 outbuf
[6] = (nb_sectors
>> 8) & 0xff;
2161 outbuf
[7] = nb_sectors
& 0xff;
2164 outbuf
[10] = s
->qdev
.blocksize
>> 8;
2167 outbuf
[13] = get_physical_block_exp(&s
->qdev
.conf
);
2169 /* set TPE bit if the format supports discard */
2170 if (s
->qdev
.conf
.discard_granularity
) {
2174 /* Protection, exponent and lowest lba field left blank. */
2177 trace_scsi_disk_emulate_command_SAI_unsupported();
2178 goto illegal_request
;
2179 case SYNCHRONIZE_CACHE
:
2180 /* The request is used as the AIO opaque value, so add a ref. */
2181 scsi_req_ref(&r
->req
);
2182 block_acct_start(blk_get_stats(s
->qdev
.conf
.blk
), &r
->acct
, 0,
2184 r
->req
.aiocb
= blk_aio_flush(s
->qdev
.conf
.blk
, scsi_aio_complete
, r
);
2187 trace_scsi_disk_emulate_command_SEEK_10(r
->req
.cmd
.lba
);
2188 if (r
->req
.cmd
.lba
> s
->qdev
.max_lba
) {
2193 trace_scsi_disk_emulate_command_MODE_SELECT(r
->req
.cmd
.xfer
);
2195 case MODE_SELECT_10
:
2196 trace_scsi_disk_emulate_command_MODE_SELECT_10(r
->req
.cmd
.xfer
);
2199 trace_scsi_disk_emulate_command_UNMAP(r
->req
.cmd
.xfer
);
2204 trace_scsi_disk_emulate_command_VERIFY((req
->cmd
.buf
[1] >> 1) & 3);
2205 if (req
->cmd
.buf
[1] & 6) {
2206 goto illegal_request
;
2211 trace_scsi_disk_emulate_command_WRITE_SAME(
2212 req
->cmd
.buf
[0] == WRITE_SAME_10
? 10 : 16, r
->req
.cmd
.xfer
);
2215 trace_scsi_disk_emulate_command_FORMAT_UNIT(r
->req
.cmd
.xfer
);
2218 trace_scsi_disk_emulate_command_UNKNOWN(buf
[0],
2219 scsi_command_name(buf
[0]));
2220 scsi_check_condition(r
, SENSE_CODE(INVALID_OPCODE
));
2223 assert(!r
->req
.aiocb
);
2224 r
->iov
.iov_len
= MIN(r
->buflen
, req
->cmd
.xfer
);
2225 if (r
->iov
.iov_len
== 0) {
2226 scsi_req_complete(&r
->req
, GOOD
);
2228 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
2229 assert(r
->iov
.iov_len
== req
->cmd
.xfer
);
2230 return -r
->iov
.iov_len
;
2232 return r
->iov
.iov_len
;
2236 if (r
->req
.status
== -1) {
2237 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
2242 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
2246 /* Execute a scsi command. Returns the length of the data expected by the
2247 command. This will be Positive for data transfers from the device
2248 (eg. disk reads), negative for transfers to the device (eg. disk writes),
2249 and zero if the command does not transfer any data. */
2251 static int32_t scsi_disk_dma_command(SCSIRequest
*req
, uint8_t *buf
)
2253 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
2254 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
2255 SCSIDiskClass
*sdc
= (SCSIDiskClass
*) object_get_class(OBJECT(s
));
2261 if (!blk_is_available(s
->qdev
.conf
.blk
)) {
2262 scsi_check_condition(r
, SENSE_CODE(NO_MEDIUM
));
2266 len
= scsi_data_cdb_xfer(r
->req
.cmd
.buf
);
2272 trace_scsi_disk_dma_command_READ(r
->req
.cmd
.lba
, len
);
2273 /* Protection information is not supported. For SCSI versions 2 and
2274 * older (as determined by snooping the guest's INQUIRY commands),
2275 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2277 if (s
->qdev
.scsi_version
> 2 && (r
->req
.cmd
.buf
[1] & 0xe0)) {
2278 goto illegal_request
;
2280 if (!check_lba_range(s
, r
->req
.cmd
.lba
, len
)) {
2283 r
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2284 r
->sector_count
= len
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2290 case WRITE_VERIFY_10
:
2291 case WRITE_VERIFY_12
:
2292 case WRITE_VERIFY_16
:
2293 if (!blk_is_writable(s
->qdev
.conf
.blk
)) {
2294 scsi_check_condition(r
, SENSE_CODE(WRITE_PROTECTED
));
2297 trace_scsi_disk_dma_command_WRITE(
2298 (command
& 0xe) == 0xe ? "And Verify " : "",
2299 r
->req
.cmd
.lba
, len
);
2304 /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2305 * As far as DMA is concerned, we can treat it the same as a write;
2306 * scsi_block_do_sgio will send VERIFY commands.
2308 if (s
->qdev
.scsi_version
> 2 && (r
->req
.cmd
.buf
[1] & 0xe0)) {
2309 goto illegal_request
;
2311 if (!check_lba_range(s
, r
->req
.cmd
.lba
, len
)) {
2314 r
->sector
= r
->req
.cmd
.lba
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2315 r
->sector_count
= len
* (s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
);
2320 scsi_check_condition(r
, SENSE_CODE(INVALID_FIELD
));
2323 scsi_check_condition(r
, SENSE_CODE(LBA_OUT_OF_RANGE
));
2326 r
->need_fua_emulation
= sdc
->need_fua_emulation(&r
->req
.cmd
);
2327 if (r
->sector_count
== 0) {
2328 scsi_req_complete(&r
->req
, GOOD
);
2330 assert(r
->iov
.iov_len
== 0);
2331 if (r
->req
.cmd
.mode
== SCSI_XFER_TO_DEV
) {
2332 return -r
->sector_count
* BDRV_SECTOR_SIZE
;
2334 return r
->sector_count
* BDRV_SECTOR_SIZE
;
2338 static void scsi_disk_reset(DeviceState
*dev
)
2340 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
.qdev
, dev
);
2341 uint64_t nb_sectors
;
2344 scsi_device_purge_requests(&s
->qdev
, SENSE_CODE(RESET
));
2346 ctx
= blk_get_aio_context(s
->qdev
.conf
.blk
);
2347 aio_context_acquire(ctx
);
2348 blk_get_geometry(s
->qdev
.conf
.blk
, &nb_sectors
);
2349 aio_context_release(ctx
);
2351 nb_sectors
/= s
->qdev
.blocksize
/ BDRV_SECTOR_SIZE
;
2355 s
->qdev
.max_lba
= nb_sectors
;
2356 /* reset tray statuses */
2360 s
->qdev
.scsi_version
= s
->qdev
.default_scsi_version
;
2363 static void scsi_disk_resize_cb(void *opaque
)
2365 SCSIDiskState
*s
= opaque
;
2367 /* SPC lists this sense code as available only for
2368 * direct-access devices.
2370 if (s
->qdev
.type
== TYPE_DISK
) {
2371 scsi_device_report_change(&s
->qdev
, SENSE_CODE(CAPACITY_CHANGED
));
2375 static void scsi_cd_change_media_cb(void *opaque
, bool load
, Error
**errp
)
2377 SCSIDiskState
*s
= opaque
;
2380 * When a CD gets changed, we have to report an ejected state and
2381 * then a loaded state to guests so that they detect tray
2382 * open/close and media change events. Guests that do not use
2383 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2384 * states rely on this behavior.
2386 * media_changed governs the state machine used for unit attention
2387 * report. media_event is used by GET EVENT STATUS NOTIFICATION.
2389 s
->media_changed
= load
;
2390 s
->tray_open
= !load
;
2391 scsi_device_set_ua(&s
->qdev
, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM
));
2392 s
->media_event
= true;
2393 s
->eject_request
= false;
2396 static void scsi_cd_eject_request_cb(void *opaque
, bool force
)
2398 SCSIDiskState
*s
= opaque
;
2400 s
->eject_request
= true;
2402 s
->tray_locked
= false;
2406 static bool scsi_cd_is_tray_open(void *opaque
)
2408 return ((SCSIDiskState
*)opaque
)->tray_open
;
2411 static bool scsi_cd_is_medium_locked(void *opaque
)
2413 return ((SCSIDiskState
*)opaque
)->tray_locked
;
2416 static const BlockDevOps scsi_disk_removable_block_ops
= {
2417 .change_media_cb
= scsi_cd_change_media_cb
,
2418 .eject_request_cb
= scsi_cd_eject_request_cb
,
2419 .is_tray_open
= scsi_cd_is_tray_open
,
2420 .is_medium_locked
= scsi_cd_is_medium_locked
,
2422 .resize_cb
= scsi_disk_resize_cb
,
2425 static const BlockDevOps scsi_disk_block_ops
= {
2426 .resize_cb
= scsi_disk_resize_cb
,
2429 static void scsi_disk_unit_attention_reported(SCSIDevice
*dev
)
2431 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2432 if (s
->media_changed
) {
2433 s
->media_changed
= false;
2434 scsi_device_set_ua(&s
->qdev
, SENSE_CODE(MEDIUM_CHANGED
));
2438 static void scsi_realize(SCSIDevice
*dev
, Error
**errp
)
2440 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2443 if (!s
->qdev
.conf
.blk
) {
2444 error_setg(errp
, "drive property not set");
2448 if (!(s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) &&
2449 !blk_is_inserted(s
->qdev
.conf
.blk
)) {
2450 error_setg(errp
, "Device needs media, but drive is empty");
2454 if (!blkconf_blocksizes(&s
->qdev
.conf
, errp
)) {
2458 if (blk_get_aio_context(s
->qdev
.conf
.blk
) != qemu_get_aio_context() &&
2459 !s
->qdev
.hba_supports_iothread
)
2461 error_setg(errp
, "HBA does not support iothreads");
2465 if (dev
->type
== TYPE_DISK
) {
2466 if (!blkconf_geometry(&dev
->conf
, NULL
, 65535, 255, 255, errp
)) {
2471 read_only
= !blk_supports_write_perm(s
->qdev
.conf
.blk
);
2472 if (dev
->type
== TYPE_ROM
) {
2476 if (!blkconf_apply_backend_options(&dev
->conf
, read_only
,
2477 dev
->type
== TYPE_DISK
, errp
)) {
2481 if (s
->qdev
.conf
.discard_granularity
== -1) {
2482 s
->qdev
.conf
.discard_granularity
=
2483 MAX(s
->qdev
.conf
.logical_block_size
, DEFAULT_DISCARD_GRANULARITY
);
2487 s
->version
= g_strdup(qemu_hw_version());
2490 s
->vendor
= g_strdup("QEMU");
2492 if (!s
->device_id
) {
2494 s
->device_id
= g_strdup_printf("%.20s", s
->serial
);
2496 const char *str
= blk_name(s
->qdev
.conf
.blk
);
2498 s
->device_id
= g_strdup(str
);
2503 if (blk_is_sg(s
->qdev
.conf
.blk
)) {
2504 error_setg(errp
, "unwanted /dev/sg*");
2508 if ((s
->features
& (1 << SCSI_DISK_F_REMOVABLE
)) &&
2509 !(s
->features
& (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS
))) {
2510 blk_set_dev_ops(s
->qdev
.conf
.blk
, &scsi_disk_removable_block_ops
, s
);
2512 blk_set_dev_ops(s
->qdev
.conf
.blk
, &scsi_disk_block_ops
, s
);
2515 blk_iostatus_enable(s
->qdev
.conf
.blk
);
2517 add_boot_device_lchs(&dev
->qdev
, NULL
,
2523 static void scsi_unrealize(SCSIDevice
*dev
)
2525 del_boot_device_lchs(&dev
->qdev
, NULL
);
2528 static void scsi_hd_realize(SCSIDevice
*dev
, Error
**errp
)
2530 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2531 AioContext
*ctx
= NULL
;
2532 /* can happen for devices without drive. The error message for missing
2533 * backend will be issued in scsi_realize
2535 if (s
->qdev
.conf
.blk
) {
2536 ctx
= blk_get_aio_context(s
->qdev
.conf
.blk
);
2537 aio_context_acquire(ctx
);
2538 if (!blkconf_blocksizes(&s
->qdev
.conf
, errp
)) {
2542 s
->qdev
.blocksize
= s
->qdev
.conf
.logical_block_size
;
2543 s
->qdev
.type
= TYPE_DISK
;
2545 s
->product
= g_strdup("QEMU HARDDISK");
2547 scsi_realize(&s
->qdev
, errp
);
2550 aio_context_release(ctx
);
2554 static void scsi_cd_realize(SCSIDevice
*dev
, Error
**errp
)
2556 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2559 uint32_t blocksize
= 2048;
2561 if (!dev
->conf
.blk
) {
2562 /* Anonymous BlockBackend for an empty drive. As we put it into
2563 * dev->conf, qdev takes care of detaching on unplug. */
2564 dev
->conf
.blk
= blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL
);
2565 ret
= blk_attach_dev(dev
->conf
.blk
, &dev
->qdev
);
2569 if (dev
->conf
.physical_block_size
!= 0) {
2570 blocksize
= dev
->conf
.physical_block_size
;
2573 ctx
= blk_get_aio_context(dev
->conf
.blk
);
2574 aio_context_acquire(ctx
);
2575 s
->qdev
.blocksize
= blocksize
;
2576 s
->qdev
.type
= TYPE_ROM
;
2577 s
->features
|= 1 << SCSI_DISK_F_REMOVABLE
;
2579 s
->product
= g_strdup("QEMU CD-ROM");
2581 scsi_realize(&s
->qdev
, errp
);
2582 aio_context_release(ctx
);
2586 static const SCSIReqOps scsi_disk_emulate_reqops
= {
2587 .size
= sizeof(SCSIDiskReq
),
2588 .free_req
= scsi_free_request
,
2589 .send_command
= scsi_disk_emulate_command
,
2590 .read_data
= scsi_disk_emulate_read_data
,
2591 .write_data
= scsi_disk_emulate_write_data
,
2592 .get_buf
= scsi_get_buf
,
2595 static const SCSIReqOps scsi_disk_dma_reqops
= {
2596 .size
= sizeof(SCSIDiskReq
),
2597 .free_req
= scsi_free_request
,
2598 .send_command
= scsi_disk_dma_command
,
2599 .read_data
= scsi_read_data
,
2600 .write_data
= scsi_write_data
,
2601 .get_buf
= scsi_get_buf
,
2602 .load_request
= scsi_disk_load_request
,
2603 .save_request
= scsi_disk_save_request
,
2606 static const SCSIReqOps
*const scsi_disk_reqops_dispatch
[256] = {
2607 [TEST_UNIT_READY
] = &scsi_disk_emulate_reqops
,
2608 [INQUIRY
] = &scsi_disk_emulate_reqops
,
2609 [MODE_SENSE
] = &scsi_disk_emulate_reqops
,
2610 [MODE_SENSE_10
] = &scsi_disk_emulate_reqops
,
2611 [START_STOP
] = &scsi_disk_emulate_reqops
,
2612 [ALLOW_MEDIUM_REMOVAL
] = &scsi_disk_emulate_reqops
,
2613 [READ_CAPACITY_10
] = &scsi_disk_emulate_reqops
,
2614 [READ_TOC
] = &scsi_disk_emulate_reqops
,
2615 [READ_DVD_STRUCTURE
] = &scsi_disk_emulate_reqops
,
2616 [READ_DISC_INFORMATION
] = &scsi_disk_emulate_reqops
,
2617 [GET_CONFIGURATION
] = &scsi_disk_emulate_reqops
,
2618 [GET_EVENT_STATUS_NOTIFICATION
] = &scsi_disk_emulate_reqops
,
2619 [MECHANISM_STATUS
] = &scsi_disk_emulate_reqops
,
2620 [SERVICE_ACTION_IN_16
] = &scsi_disk_emulate_reqops
,
2621 [REQUEST_SENSE
] = &scsi_disk_emulate_reqops
,
2622 [SYNCHRONIZE_CACHE
] = &scsi_disk_emulate_reqops
,
2623 [SEEK_10
] = &scsi_disk_emulate_reqops
,
2624 [MODE_SELECT
] = &scsi_disk_emulate_reqops
,
2625 [MODE_SELECT_10
] = &scsi_disk_emulate_reqops
,
2626 [UNMAP
] = &scsi_disk_emulate_reqops
,
2627 [WRITE_SAME_10
] = &scsi_disk_emulate_reqops
,
2628 [WRITE_SAME_16
] = &scsi_disk_emulate_reqops
,
2629 [VERIFY_10
] = &scsi_disk_emulate_reqops
,
2630 [VERIFY_12
] = &scsi_disk_emulate_reqops
,
2631 [VERIFY_16
] = &scsi_disk_emulate_reqops
,
2632 [FORMAT_UNIT
] = &scsi_disk_emulate_reqops
,
2634 [READ_6
] = &scsi_disk_dma_reqops
,
2635 [READ_10
] = &scsi_disk_dma_reqops
,
2636 [READ_12
] = &scsi_disk_dma_reqops
,
2637 [READ_16
] = &scsi_disk_dma_reqops
,
2638 [WRITE_6
] = &scsi_disk_dma_reqops
,
2639 [WRITE_10
] = &scsi_disk_dma_reqops
,
2640 [WRITE_12
] = &scsi_disk_dma_reqops
,
2641 [WRITE_16
] = &scsi_disk_dma_reqops
,
2642 [WRITE_VERIFY_10
] = &scsi_disk_dma_reqops
,
2643 [WRITE_VERIFY_12
] = &scsi_disk_dma_reqops
,
2644 [WRITE_VERIFY_16
] = &scsi_disk_dma_reqops
,
2647 static void scsi_disk_new_request_dump(uint32_t lun
, uint32_t tag
, uint8_t *buf
)
2650 int len
= scsi_cdb_length(buf
);
2651 char *line_buffer
, *p
;
2653 assert(len
> 0 && len
<= 16);
2654 line_buffer
= g_malloc(len
* 5 + 1);
2656 for (i
= 0, p
= line_buffer
; i
< len
; i
++) {
2657 p
+= sprintf(p
, " 0x%02x", buf
[i
]);
2659 trace_scsi_disk_new_request(lun
, tag
, line_buffer
);
2661 g_free(line_buffer
);
2664 static SCSIRequest
*scsi_new_request(SCSIDevice
*d
, uint32_t tag
, uint32_t lun
,
2665 uint8_t *buf
, void *hba_private
)
2667 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
2669 const SCSIReqOps
*ops
;
2673 ops
= scsi_disk_reqops_dispatch
[command
];
2675 ops
= &scsi_disk_emulate_reqops
;
2677 req
= scsi_req_alloc(ops
, &s
->qdev
, tag
, lun
, hba_private
);
2679 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST
)) {
2680 scsi_disk_new_request_dump(lun
, tag
, buf
);
2687 static int get_device_type(SCSIDiskState
*s
)
2693 memset(cmd
, 0, sizeof(cmd
));
2694 memset(buf
, 0, sizeof(buf
));
2696 cmd
[4] = sizeof(buf
);
2698 ret
= scsi_SG_IO_FROM_DEV(s
->qdev
.conf
.blk
, cmd
, sizeof(cmd
),
2699 buf
, sizeof(buf
), s
->qdev
.io_timeout
);
2703 s
->qdev
.type
= buf
[0];
2704 if (buf
[1] & 0x80) {
2705 s
->features
|= 1 << SCSI_DISK_F_REMOVABLE
;
2710 static void scsi_block_realize(SCSIDevice
*dev
, Error
**errp
)
2712 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, dev
);
2717 if (!s
->qdev
.conf
.blk
) {
2718 error_setg(errp
, "drive property not set");
2722 if (s
->rotation_rate
) {
2723 error_report_once("rotation_rate is specified for scsi-block but is "
2724 "not implemented. This option is deprecated and will "
2725 "be removed in a future version");
2728 ctx
= blk_get_aio_context(s
->qdev
.conf
.blk
);
2729 aio_context_acquire(ctx
);
2731 /* check we are using a driver managing SG_IO (version 3 and after) */
2732 rc
= blk_ioctl(s
->qdev
.conf
.blk
, SG_GET_VERSION_NUM
, &sg_version
);
2734 error_setg_errno(errp
, -rc
, "cannot get SG_IO version number");
2736 error_append_hint(errp
, "Is this a SCSI device?\n");
2740 if (sg_version
< 30000) {
2741 error_setg(errp
, "scsi generic interface too old");
2745 /* get device type from INQUIRY data */
2746 rc
= get_device_type(s
);
2748 error_setg(errp
, "INQUIRY failed");
2752 /* Make a guess for the block size, we'll fix it when the guest sends.
2753 * READ CAPACITY. If they don't, they likely would assume these sizes
2754 * anyway. (TODO: check in /sys).
2756 if (s
->qdev
.type
== TYPE_ROM
|| s
->qdev
.type
== TYPE_WORM
) {
2757 s
->qdev
.blocksize
= 2048;
2759 s
->qdev
.blocksize
= 512;
2762 /* Makes the scsi-block device not removable by using HMP and QMP eject
2765 s
->features
|= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS
);
2767 scsi_realize(&s
->qdev
, errp
);
2768 scsi_generic_read_device_inquiry(&s
->qdev
);
2771 aio_context_release(ctx
);
2774 typedef struct SCSIBlockReq
{
2776 sg_io_hdr_t io_header
;
2778 /* Selected bytes of the original CDB, copied into our own CDB. */
2779 uint8_t cmd
, cdb1
, group_number
;
2781 /* CDB passed to SG_IO. */
2783 BlockCompletionFunc
*cb
;
2787 static void scsi_block_sgio_complete(void *opaque
, int ret
)
2789 SCSIBlockReq
*req
= (SCSIBlockReq
*)opaque
;
2790 SCSIDiskReq
*r
= &req
->req
;
2791 SCSIDevice
*s
= r
->req
.dev
;
2792 sg_io_hdr_t
*io_hdr
= &req
->io_header
;
2795 if (io_hdr
->host_status
!= SCSI_HOST_OK
) {
2796 scsi_req_complete_failed(&r
->req
, io_hdr
->host_status
);
2797 scsi_req_unref(&r
->req
);
2801 if (io_hdr
->driver_status
& SG_ERR_DRIVER_TIMEOUT
) {
2804 ret
= io_hdr
->status
;
2808 aio_context_acquire(blk_get_aio_context(s
->conf
.blk
));
2809 if (scsi_handle_rw_error(r
, ret
, true)) {
2810 aio_context_release(blk_get_aio_context(s
->conf
.blk
));
2811 scsi_req_unref(&r
->req
);
2814 aio_context_release(blk_get_aio_context(s
->conf
.blk
));
2821 req
->cb(req
->cb_opaque
, ret
);
2824 static BlockAIOCB
*scsi_block_do_sgio(SCSIBlockReq
*req
,
2825 int64_t offset
, QEMUIOVector
*iov
,
2827 BlockCompletionFunc
*cb
, void *opaque
)
2829 sg_io_hdr_t
*io_header
= &req
->io_header
;
2830 SCSIDiskReq
*r
= &req
->req
;
2831 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
2832 int nb_logical_blocks
;
2836 /* This is not supported yet. It can only happen if the guest does
2837 * reads and writes that are not aligned to one logical sectors
2838 * _and_ cover multiple MemoryRegions.
2840 assert(offset
% s
->qdev
.blocksize
== 0);
2841 assert(iov
->size
% s
->qdev
.blocksize
== 0);
2843 io_header
->interface_id
= 'S';
2845 /* The data transfer comes from the QEMUIOVector. */
2846 io_header
->dxfer_direction
= direction
;
2847 io_header
->dxfer_len
= iov
->size
;
2848 io_header
->dxferp
= (void *)iov
->iov
;
2849 io_header
->iovec_count
= iov
->niov
;
2850 assert(io_header
->iovec_count
== iov
->niov
); /* no overflow! */
2852 /* Build a new CDB with the LBA and length patched in, in case
2853 * DMA helpers split the transfer in multiple segments. Do not
2854 * build a CDB smaller than what the guest wanted, and only build
2855 * a larger one if strictly necessary.
2857 io_header
->cmdp
= req
->cdb
;
2858 lba
= offset
/ s
->qdev
.blocksize
;
2859 nb_logical_blocks
= io_header
->dxfer_len
/ s
->qdev
.blocksize
;
2861 if ((req
->cmd
>> 5) == 0 && lba
<= 0x1ffff) {
2863 stl_be_p(&req
->cdb
[0], lba
| (req
->cmd
<< 24));
2864 req
->cdb
[4] = nb_logical_blocks
;
2866 io_header
->cmd_len
= 6;
2867 } else if ((req
->cmd
>> 5) <= 1 && lba
<= 0xffffffffULL
) {
2869 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0x20;
2870 req
->cdb
[1] = req
->cdb1
;
2871 stl_be_p(&req
->cdb
[2], lba
);
2872 req
->cdb
[6] = req
->group_number
;
2873 stw_be_p(&req
->cdb
[7], nb_logical_blocks
);
2875 io_header
->cmd_len
= 10;
2876 } else if ((req
->cmd
>> 5) != 4 && lba
<= 0xffffffffULL
) {
2878 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0xA0;
2879 req
->cdb
[1] = req
->cdb1
;
2880 stl_be_p(&req
->cdb
[2], lba
);
2881 stl_be_p(&req
->cdb
[6], nb_logical_blocks
);
2882 req
->cdb
[10] = req
->group_number
;
2884 io_header
->cmd_len
= 12;
2887 req
->cdb
[0] = (req
->cmd
& 0x1f) | 0x80;
2888 req
->cdb
[1] = req
->cdb1
;
2889 stq_be_p(&req
->cdb
[2], lba
);
2890 stl_be_p(&req
->cdb
[10], nb_logical_blocks
);
2891 req
->cdb
[14] = req
->group_number
;
2893 io_header
->cmd_len
= 16;
2896 /* The rest is as in scsi-generic.c. */
2897 io_header
->mx_sb_len
= sizeof(r
->req
.sense
);
2898 io_header
->sbp
= r
->req
.sense
;
2899 io_header
->timeout
= s
->qdev
.io_timeout
* 1000;
2900 io_header
->usr_ptr
= r
;
2901 io_header
->flags
|= SG_FLAG_DIRECT_IO
;
2903 req
->cb_opaque
= opaque
;
2904 trace_scsi_disk_aio_sgio_command(r
->req
.tag
, req
->cdb
[0], lba
,
2905 nb_logical_blocks
, io_header
->timeout
);
2906 aiocb
= blk_aio_ioctl(s
->qdev
.conf
.blk
, SG_IO
, io_header
, scsi_block_sgio_complete
, req
);
2907 assert(aiocb
!= NULL
);
2911 static bool scsi_block_no_fua(SCSICommand
*cmd
)
2916 static BlockAIOCB
*scsi_block_dma_readv(int64_t offset
,
2918 BlockCompletionFunc
*cb
, void *cb_opaque
,
2921 SCSIBlockReq
*r
= opaque
;
2922 return scsi_block_do_sgio(r
, offset
, iov
,
2923 SG_DXFER_FROM_DEV
, cb
, cb_opaque
);
2926 static BlockAIOCB
*scsi_block_dma_writev(int64_t offset
,
2928 BlockCompletionFunc
*cb
, void *cb_opaque
,
2931 SCSIBlockReq
*r
= opaque
;
2932 return scsi_block_do_sgio(r
, offset
, iov
,
2933 SG_DXFER_TO_DEV
, cb
, cb_opaque
);
2936 static bool scsi_block_is_passthrough(SCSIDiskState
*s
, uint8_t *buf
)
2942 /* Check if BYTCHK == 0x01 (data-out buffer contains data
2943 * for the number of logical blocks specified in the length
2944 * field). For other modes, do not use scatter/gather operation.
2946 if ((buf
[1] & 6) == 2) {
2959 case WRITE_VERIFY_10
:
2960 case WRITE_VERIFY_12
:
2961 case WRITE_VERIFY_16
:
2962 /* MMC writing cannot be done via DMA helpers, because it sometimes
2963 * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2964 * We might use scsi_block_dma_reqops as long as no writing commands are
2965 * seen, but performance usually isn't paramount on optical media. So,
2966 * just make scsi-block operate the same as scsi-generic for them.
2968 if (s
->qdev
.type
!= TYPE_ROM
) {
2981 static int32_t scsi_block_dma_command(SCSIRequest
*req
, uint8_t *buf
)
2983 SCSIBlockReq
*r
= (SCSIBlockReq
*)req
;
2984 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, req
->dev
);
2986 r
->cmd
= req
->cmd
.buf
[0];
2987 switch (r
->cmd
>> 5) {
2990 r
->cdb1
= r
->group_number
= 0;
2994 r
->cdb1
= req
->cmd
.buf
[1];
2995 r
->group_number
= req
->cmd
.buf
[6];
2999 r
->cdb1
= req
->cmd
.buf
[1];
3000 r
->group_number
= req
->cmd
.buf
[10];
3004 r
->cdb1
= req
->cmd
.buf
[1];
3005 r
->group_number
= req
->cmd
.buf
[14];
3011 /* Protection information is not supported. For SCSI versions 2 and
3012 * older (as determined by snooping the guest's INQUIRY commands),
3013 * there is no RD/WR/VRPROTECT, so skip this check in these versions.
3015 if (s
->qdev
.scsi_version
> 2 && (req
->cmd
.buf
[1] & 0xe0)) {
3016 scsi_check_condition(&r
->req
, SENSE_CODE(INVALID_FIELD
));
3020 return scsi_disk_dma_command(req
, buf
);
3023 static const SCSIReqOps scsi_block_dma_reqops
= {
3024 .size
= sizeof(SCSIBlockReq
),
3025 .free_req
= scsi_free_request
,
3026 .send_command
= scsi_block_dma_command
,
3027 .read_data
= scsi_read_data
,
3028 .write_data
= scsi_write_data
,
3029 .get_buf
= scsi_get_buf
,
3030 .load_request
= scsi_disk_load_request
,
3031 .save_request
= scsi_disk_save_request
,
3034 static SCSIRequest
*scsi_block_new_request(SCSIDevice
*d
, uint32_t tag
,
3035 uint32_t lun
, uint8_t *buf
,
3038 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
3040 if (scsi_block_is_passthrough(s
, buf
)) {
3041 return scsi_req_alloc(&scsi_generic_req_ops
, &s
->qdev
, tag
, lun
,
3044 return scsi_req_alloc(&scsi_block_dma_reqops
, &s
->qdev
, tag
, lun
,
3049 static int scsi_block_parse_cdb(SCSIDevice
*d
, SCSICommand
*cmd
,
3050 uint8_t *buf
, size_t buf_len
,
3053 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, d
);
3055 if (scsi_block_is_passthrough(s
, buf
)) {
3056 return scsi_bus_parse_cdb(&s
->qdev
, cmd
, buf
, buf_len
, hba_private
);
3058 return scsi_req_parse_cdb(&s
->qdev
, cmd
, buf
, buf_len
);
3062 static void scsi_block_update_sense(SCSIRequest
*req
)
3064 SCSIDiskReq
*r
= DO_UPCAST(SCSIDiskReq
, req
, req
);
3065 SCSIBlockReq
*br
= DO_UPCAST(SCSIBlockReq
, req
, r
);
3066 r
->req
.sense_len
= MIN(br
->io_header
.sb_len_wr
, sizeof(r
->req
.sense
));
3071 BlockAIOCB
*scsi_dma_readv(int64_t offset
, QEMUIOVector
*iov
,
3072 BlockCompletionFunc
*cb
, void *cb_opaque
,
3075 SCSIDiskReq
*r
= opaque
;
3076 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
3077 return blk_aio_preadv(s
->qdev
.conf
.blk
, offset
, iov
, 0, cb
, cb_opaque
);
3081 BlockAIOCB
*scsi_dma_writev(int64_t offset
, QEMUIOVector
*iov
,
3082 BlockCompletionFunc
*cb
, void *cb_opaque
,
3085 SCSIDiskReq
*r
= opaque
;
3086 SCSIDiskState
*s
= DO_UPCAST(SCSIDiskState
, qdev
, r
->req
.dev
);
3087 return blk_aio_pwritev(s
->qdev
.conf
.blk
, offset
, iov
, 0, cb
, cb_opaque
);
3090 static void scsi_disk_base_class_initfn(ObjectClass
*klass
, void *data
)
3092 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3093 SCSIDiskClass
*sdc
= SCSI_DISK_BASE_CLASS(klass
);
3095 dc
->fw_name
= "disk";
3096 dc
->reset
= scsi_disk_reset
;
3097 sdc
->dma_readv
= scsi_dma_readv
;
3098 sdc
->dma_writev
= scsi_dma_writev
;
3099 sdc
->need_fua_emulation
= scsi_is_cmd_fua
;
3102 static const TypeInfo scsi_disk_base_info
= {
3103 .name
= TYPE_SCSI_DISK_BASE
,
3104 .parent
= TYPE_SCSI_DEVICE
,
3105 .class_init
= scsi_disk_base_class_initfn
,
3106 .instance_size
= sizeof(SCSIDiskState
),
3107 .class_size
= sizeof(SCSIDiskClass
),
3111 #define DEFINE_SCSI_DISK_PROPERTIES() \
3112 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \
3113 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \
3114 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \
3115 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
3116 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
3117 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
3118 DEFINE_PROP_STRING("product", SCSIDiskState, product), \
3119 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3122 static Property scsi_hd_properties
[] = {
3123 DEFINE_SCSI_DISK_PROPERTIES(),
3124 DEFINE_PROP_BIT("removable", SCSIDiskState
, features
,
3125 SCSI_DISK_F_REMOVABLE
, false),
3126 DEFINE_PROP_BIT("dpofua", SCSIDiskState
, features
,
3127 SCSI_DISK_F_DPOFUA
, false),
3128 DEFINE_PROP_UINT64("wwn", SCSIDiskState
, qdev
.wwn
, 0),
3129 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState
, qdev
.port_wwn
, 0),
3130 DEFINE_PROP_UINT16("port_index", SCSIDiskState
, port_index
, 0),
3131 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState
, max_unmap_size
,
3132 DEFAULT_MAX_UNMAP_SIZE
),
3133 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3134 DEFAULT_MAX_IO_SIZE
),
3135 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState
, rotation_rate
, 0),
3136 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3138 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState
,
3139 quirks
, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
,
3141 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState
, qdev
.conf
),
3142 DEFINE_PROP_END_OF_LIST(),
3145 static const VMStateDescription vmstate_scsi_disk_state
= {
3146 .name
= "scsi-disk",
3148 .minimum_version_id
= 1,
3149 .fields
= (VMStateField
[]) {
3150 VMSTATE_SCSI_DEVICE(qdev
, SCSIDiskState
),
3151 VMSTATE_BOOL(media_changed
, SCSIDiskState
),
3152 VMSTATE_BOOL(media_event
, SCSIDiskState
),
3153 VMSTATE_BOOL(eject_request
, SCSIDiskState
),
3154 VMSTATE_BOOL(tray_open
, SCSIDiskState
),
3155 VMSTATE_BOOL(tray_locked
, SCSIDiskState
),
3156 VMSTATE_END_OF_LIST()
3160 static void scsi_hd_class_initfn(ObjectClass
*klass
, void *data
)
3162 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3163 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3165 sc
->realize
= scsi_hd_realize
;
3166 sc
->unrealize
= scsi_unrealize
;
3167 sc
->alloc_req
= scsi_new_request
;
3168 sc
->unit_attention_reported
= scsi_disk_unit_attention_reported
;
3169 dc
->desc
= "virtual SCSI disk";
3170 device_class_set_props(dc
, scsi_hd_properties
);
3171 dc
->vmsd
= &vmstate_scsi_disk_state
;
3174 static const TypeInfo scsi_hd_info
= {
3176 .parent
= TYPE_SCSI_DISK_BASE
,
3177 .class_init
= scsi_hd_class_initfn
,
3180 static Property scsi_cd_properties
[] = {
3181 DEFINE_SCSI_DISK_PROPERTIES(),
3182 DEFINE_PROP_UINT64("wwn", SCSIDiskState
, qdev
.wwn
, 0),
3183 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState
, qdev
.port_wwn
, 0),
3184 DEFINE_PROP_UINT16("port_index", SCSIDiskState
, port_index
, 0),
3185 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3186 DEFAULT_MAX_IO_SIZE
),
3187 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3189 DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState
, quirks
,
3190 SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR
, 0),
3191 DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState
, quirks
,
3192 SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD
, 0),
3193 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState
,
3194 quirks
, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE
,
3196 DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState
, quirks
,
3197 SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED
, 0),
3198 DEFINE_PROP_END_OF_LIST(),
3201 static void scsi_cd_class_initfn(ObjectClass
*klass
, void *data
)
3203 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3204 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3206 sc
->realize
= scsi_cd_realize
;
3207 sc
->alloc_req
= scsi_new_request
;
3208 sc
->unit_attention_reported
= scsi_disk_unit_attention_reported
;
3209 dc
->desc
= "virtual SCSI CD-ROM";
3210 device_class_set_props(dc
, scsi_cd_properties
);
3211 dc
->vmsd
= &vmstate_scsi_disk_state
;
3214 static const TypeInfo scsi_cd_info
= {
3216 .parent
= TYPE_SCSI_DISK_BASE
,
3217 .class_init
= scsi_cd_class_initfn
,
3221 static Property scsi_block_properties
[] = {
3222 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState
, qdev
.conf
),
3223 DEFINE_PROP_DRIVE("drive", SCSIDiskState
, qdev
.conf
.blk
),
3224 DEFINE_PROP_BOOL("share-rw", SCSIDiskState
, qdev
.conf
.share_rw
, false),
3225 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState
, rotation_rate
, 0),
3226 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState
, max_unmap_size
,
3227 DEFAULT_MAX_UNMAP_SIZE
),
3228 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState
, max_io_size
,
3229 DEFAULT_MAX_IO_SIZE
),
3230 DEFINE_PROP_INT32("scsi_version", SCSIDiskState
, qdev
.default_scsi_version
,
3232 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState
, qdev
.io_timeout
,
3233 DEFAULT_IO_TIMEOUT
),
3234 DEFINE_PROP_END_OF_LIST(),
3237 static void scsi_block_class_initfn(ObjectClass
*klass
, void *data
)
3239 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3240 SCSIDeviceClass
*sc
= SCSI_DEVICE_CLASS(klass
);
3241 SCSIDiskClass
*sdc
= SCSI_DISK_BASE_CLASS(klass
);
3243 sc
->realize
= scsi_block_realize
;
3244 sc
->alloc_req
= scsi_block_new_request
;
3245 sc
->parse_cdb
= scsi_block_parse_cdb
;
3246 sdc
->dma_readv
= scsi_block_dma_readv
;
3247 sdc
->dma_writev
= scsi_block_dma_writev
;
3248 sdc
->update_sense
= scsi_block_update_sense
;
3249 sdc
->need_fua_emulation
= scsi_block_no_fua
;
3250 dc
->desc
= "SCSI block device passthrough";
3251 device_class_set_props(dc
, scsi_block_properties
);
3252 dc
->vmsd
= &vmstate_scsi_disk_state
;
3255 static const TypeInfo scsi_block_info
= {
3256 .name
= "scsi-block",
3257 .parent
= TYPE_SCSI_DISK_BASE
,
3258 .class_init
= scsi_block_class_initfn
,
3262 static void scsi_disk_register_types(void)
3264 type_register_static(&scsi_disk_base_info
);
3265 type_register_static(&scsi_hd_info
);
3266 type_register_static(&scsi_cd_info
);
3268 type_register_static(&scsi_block_info
);
3272 type_init(scsi_disk_register_types
)