sysemu: Split sysemu/runstate.h off sysemu/sysemu.h
[qemu/armbru.git] / hw / scsi / scsi-bus.c
blobbccb7cc4c652e59e2687e2ed6efa644fe0e7358a
1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "qemu/error-report.h"
4 #include "qemu/module.h"
5 #include "qemu/option.h"
6 #include "hw/qdev-properties.h"
7 #include "hw/scsi/scsi.h"
8 #include "migration/qemu-file-types.h"
9 #include "migration/vmstate.h"
10 #include "scsi/constants.h"
11 #include "sysemu/block-backend.h"
12 #include "sysemu/blockdev.h"
13 #include "sysemu/sysemu.h"
14 #include "sysemu/runstate.h"
15 #include "trace.h"
16 #include "sysemu/dma.h"
17 #include "qemu/cutils.h"
19 static char *scsibus_get_dev_path(DeviceState *dev);
20 static char *scsibus_get_fw_dev_path(DeviceState *dev);
21 static void scsi_req_dequeue(SCSIRequest *req);
22 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len);
23 static void scsi_target_free_buf(SCSIRequest *req);
25 static Property scsi_props[] = {
26 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0),
27 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1),
28 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1),
29 DEFINE_PROP_END_OF_LIST(),
32 static void scsi_bus_class_init(ObjectClass *klass, void *data)
34 BusClass *k = BUS_CLASS(klass);
35 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
37 k->get_dev_path = scsibus_get_dev_path;
38 k->get_fw_dev_path = scsibus_get_fw_dev_path;
39 hc->unplug = qdev_simple_device_unplug_cb;
42 static const TypeInfo scsi_bus_info = {
43 .name = TYPE_SCSI_BUS,
44 .parent = TYPE_BUS,
45 .instance_size = sizeof(SCSIBus),
46 .class_init = scsi_bus_class_init,
47 .interfaces = (InterfaceInfo[]) {
48 { TYPE_HOTPLUG_HANDLER },
49 { }
52 static int next_scsi_bus;
54 static void scsi_device_realize(SCSIDevice *s, Error **errp)
56 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
57 if (sc->realize) {
58 sc->realize(s, errp);
62 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf,
63 void *hba_private)
65 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
66 int rc;
68 assert(cmd->len == 0);
69 rc = scsi_req_parse_cdb(dev, cmd, buf);
70 if (bus->info->parse_cdb) {
71 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private);
73 return rc;
76 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun,
77 uint8_t *buf, void *hba_private)
79 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
80 if (sc->alloc_req) {
81 return sc->alloc_req(s, tag, lun, buf, hba_private);
84 return NULL;
87 void scsi_device_unit_attention_reported(SCSIDevice *s)
89 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s);
90 if (sc->unit_attention_reported) {
91 sc->unit_attention_reported(s);
95 /* Create a scsi bus, and attach devices to it. */
96 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host,
97 const SCSIBusInfo *info, const char *bus_name)
99 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name);
100 bus->busnr = next_scsi_bus++;
101 bus->info = info;
102 qbus_set_bus_hotplug_handler(BUS(bus), &error_abort);
105 static void scsi_dma_restart_bh(void *opaque)
107 SCSIDevice *s = opaque;
108 SCSIRequest *req, *next;
110 qemu_bh_delete(s->bh);
111 s->bh = NULL;
113 aio_context_acquire(blk_get_aio_context(s->conf.blk));
114 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) {
115 scsi_req_ref(req);
116 if (req->retry) {
117 req->retry = false;
118 switch (req->cmd.mode) {
119 case SCSI_XFER_FROM_DEV:
120 case SCSI_XFER_TO_DEV:
121 scsi_req_continue(req);
122 break;
123 case SCSI_XFER_NONE:
124 scsi_req_dequeue(req);
125 scsi_req_enqueue(req);
126 break;
129 scsi_req_unref(req);
131 aio_context_release(blk_get_aio_context(s->conf.blk));
134 void scsi_req_retry(SCSIRequest *req)
136 /* No need to save a reference, because scsi_dma_restart_bh just
137 * looks at the request list. */
138 req->retry = true;
141 static void scsi_dma_restart_cb(void *opaque, int running, RunState state)
143 SCSIDevice *s = opaque;
145 if (!running) {
146 return;
148 if (!s->bh) {
149 AioContext *ctx = blk_get_aio_context(s->conf.blk);
150 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s);
151 qemu_bh_schedule(s->bh);
155 static void scsi_qdev_realize(DeviceState *qdev, Error **errp)
157 SCSIDevice *dev = SCSI_DEVICE(qdev);
158 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
159 SCSIDevice *d;
160 Error *local_err = NULL;
162 if (dev->channel > bus->info->max_channel) {
163 error_setg(errp, "bad scsi channel id: %d", dev->channel);
164 return;
166 if (dev->id != -1 && dev->id > bus->info->max_target) {
167 error_setg(errp, "bad scsi device id: %d", dev->id);
168 return;
170 if (dev->lun != -1 && dev->lun > bus->info->max_lun) {
171 error_setg(errp, "bad scsi device lun: %d", dev->lun);
172 return;
175 if (dev->id == -1) {
176 int id = -1;
177 if (dev->lun == -1) {
178 dev->lun = 0;
180 do {
181 d = scsi_device_find(bus, dev->channel, ++id, dev->lun);
182 } while (d && d->lun == dev->lun && id < bus->info->max_target);
183 if (d && d->lun == dev->lun) {
184 error_setg(errp, "no free target");
185 return;
187 dev->id = id;
188 } else if (dev->lun == -1) {
189 int lun = -1;
190 do {
191 d = scsi_device_find(bus, dev->channel, dev->id, ++lun);
192 } while (d && d->lun == lun && lun < bus->info->max_lun);
193 if (d && d->lun == lun) {
194 error_setg(errp, "no free lun");
195 return;
197 dev->lun = lun;
198 } else {
199 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun);
200 assert(d);
201 if (d->lun == dev->lun && dev != d) {
202 error_setg(errp, "lun already used by '%s'", d->qdev.id);
203 return;
207 QTAILQ_INIT(&dev->requests);
208 scsi_device_realize(dev, &local_err);
209 if (local_err) {
210 error_propagate(errp, local_err);
211 return;
213 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev),
214 scsi_dma_restart_cb, dev);
217 static void scsi_qdev_unrealize(DeviceState *qdev, Error **errp)
219 SCSIDevice *dev = SCSI_DEVICE(qdev);
221 if (dev->vmsentry) {
222 qemu_del_vm_change_state_handler(dev->vmsentry);
225 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE));
226 blockdev_mark_auto_del(dev->conf.blk);
229 /* handle legacy '-drive if=scsi,...' cmd line args */
230 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
231 int unit, bool removable, int bootindex,
232 bool share_rw,
233 BlockdevOnError rerror,
234 BlockdevOnError werror,
235 const char *serial, Error **errp)
237 const char *driver;
238 char *name;
239 DeviceState *dev;
240 Error *err = NULL;
242 driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk";
243 dev = qdev_create(&bus->qbus, driver);
244 name = g_strdup_printf("legacy[%d]", unit);
245 object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL);
246 g_free(name);
248 qdev_prop_set_uint32(dev, "scsi-id", unit);
249 if (bootindex >= 0) {
250 object_property_set_int(OBJECT(dev), bootindex, "bootindex",
251 &error_abort);
253 if (object_property_find(OBJECT(dev), "removable", NULL)) {
254 qdev_prop_set_bit(dev, "removable", removable);
256 if (serial && object_property_find(OBJECT(dev), "serial", NULL)) {
257 qdev_prop_set_string(dev, "serial", serial);
259 qdev_prop_set_drive(dev, "drive", blk, &err);
260 if (err) {
261 error_propagate(errp, err);
262 object_unparent(OBJECT(dev));
263 return NULL;
265 object_property_set_bool(OBJECT(dev), share_rw, "share-rw", &err);
266 if (err != NULL) {
267 error_propagate(errp, err);
268 object_unparent(OBJECT(dev));
269 return NULL;
272 qdev_prop_set_enum(dev, "rerror", rerror);
273 qdev_prop_set_enum(dev, "werror", werror);
275 object_property_set_bool(OBJECT(dev), true, "realized", &err);
276 if (err != NULL) {
277 error_propagate(errp, err);
278 object_unparent(OBJECT(dev));
279 return NULL;
281 return SCSI_DEVICE(dev);
284 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus)
286 Location loc;
287 DriveInfo *dinfo;
288 int unit;
290 loc_push_none(&loc);
291 for (unit = 0; unit <= bus->info->max_target; unit++) {
292 dinfo = drive_get(IF_SCSI, bus->busnr, unit);
293 if (dinfo == NULL) {
294 continue;
296 qemu_opts_loc_restore(dinfo->opts);
297 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo),
298 unit, false, -1, false,
299 BLOCKDEV_ON_ERROR_AUTO,
300 BLOCKDEV_ON_ERROR_AUTO,
301 NULL, &error_fatal);
303 loc_pop(&loc);
306 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf)
308 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
309 scsi_req_complete(req, CHECK_CONDITION);
310 return 0;
313 static const struct SCSIReqOps reqops_invalid_field = {
314 .size = sizeof(SCSIRequest),
315 .send_command = scsi_invalid_field
318 /* SCSIReqOps implementation for invalid commands. */
320 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf)
322 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
323 scsi_req_complete(req, CHECK_CONDITION);
324 return 0;
327 static const struct SCSIReqOps reqops_invalid_opcode = {
328 .size = sizeof(SCSIRequest),
329 .send_command = scsi_invalid_command
332 /* SCSIReqOps implementation for unit attention conditions. */
334 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
336 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
337 scsi_req_build_sense(req, req->dev->unit_attention);
338 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
339 scsi_req_build_sense(req, req->bus->unit_attention);
341 scsi_req_complete(req, CHECK_CONDITION);
342 return 0;
345 static const struct SCSIReqOps reqops_unit_attention = {
346 .size = sizeof(SCSIRequest),
347 .send_command = scsi_unit_attention
350 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to
351 an invalid LUN. */
353 typedef struct SCSITargetReq SCSITargetReq;
355 struct SCSITargetReq {
356 SCSIRequest req;
357 int len;
358 uint8_t *buf;
359 int buf_len;
362 static void store_lun(uint8_t *outbuf, int lun)
364 if (lun < 256) {
365 outbuf[1] = lun;
366 return;
368 outbuf[1] = (lun & 255);
369 outbuf[0] = (lun >> 8) | 0x40;
372 static bool scsi_target_emulate_report_luns(SCSITargetReq *r)
374 BusChild *kid;
375 int i, len, n;
376 int channel, id;
377 bool found_lun0;
379 if (r->req.cmd.xfer < 16) {
380 return false;
382 if (r->req.cmd.buf[2] > 2) {
383 return false;
385 channel = r->req.dev->channel;
386 id = r->req.dev->id;
387 found_lun0 = false;
388 n = 0;
389 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
390 DeviceState *qdev = kid->child;
391 SCSIDevice *dev = SCSI_DEVICE(qdev);
393 if (dev->channel == channel && dev->id == id) {
394 if (dev->lun == 0) {
395 found_lun0 = true;
397 n += 8;
400 if (!found_lun0) {
401 n += 8;
404 scsi_target_alloc_buf(&r->req, n + 8);
406 len = MIN(n + 8, r->req.cmd.xfer & ~7);
407 memset(r->buf, 0, len);
408 stl_be_p(&r->buf[0], n);
409 i = found_lun0 ? 8 : 16;
410 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) {
411 DeviceState *qdev = kid->child;
412 SCSIDevice *dev = SCSI_DEVICE(qdev);
414 if (dev->channel == channel && dev->id == id) {
415 store_lun(&r->buf[i], dev->lun);
416 i += 8;
419 assert(i == n + 8);
420 r->len = len;
421 return true;
424 static bool scsi_target_emulate_inquiry(SCSITargetReq *r)
426 assert(r->req.dev->lun != r->req.lun);
428 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN);
430 if (r->req.cmd.buf[1] & 0x2) {
431 /* Command support data - optional, not implemented */
432 return false;
435 if (r->req.cmd.buf[1] & 0x1) {
436 /* Vital product data */
437 uint8_t page_code = r->req.cmd.buf[2];
438 r->buf[r->len++] = page_code ; /* this page */
439 r->buf[r->len++] = 0x00;
441 switch (page_code) {
442 case 0x00: /* Supported page codes, mandatory */
444 int pages;
445 pages = r->len++;
446 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */
447 r->buf[pages] = r->len - pages - 1; /* number of pages */
448 break;
450 default:
451 return false;
453 /* done with EVPD */
454 assert(r->len < r->buf_len);
455 r->len = MIN(r->req.cmd.xfer, r->len);
456 return true;
459 /* Standard INQUIRY data */
460 if (r->req.cmd.buf[2] != 0) {
461 return false;
464 /* PAGE CODE == 0 */
465 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN);
466 memset(r->buf, 0, r->len);
467 if (r->req.lun != 0) {
468 r->buf[0] = TYPE_NO_LUN;
469 } else {
470 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE;
471 r->buf[2] = 5; /* Version */
472 r->buf[3] = 2 | 0x10; /* HiSup, response data format */
473 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */
474 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */
475 memcpy(&r->buf[8], "QEMU ", 8);
476 memcpy(&r->buf[16], "QEMU TARGET ", 16);
477 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version());
479 return true;
482 static size_t scsi_sense_len(SCSIRequest *req)
484 if (req->dev->type == TYPE_SCANNER)
485 return SCSI_SENSE_LEN_SCANNER;
486 else
487 return SCSI_SENSE_LEN;
490 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf)
492 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
493 int fixed_sense = (req->cmd.buf[1] & 1) == 0;
495 if (req->lun != 0 &&
496 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) {
497 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED));
498 scsi_req_complete(req, CHECK_CONDITION);
499 return 0;
501 switch (buf[0]) {
502 case REPORT_LUNS:
503 if (!scsi_target_emulate_report_luns(r)) {
504 goto illegal_request;
506 break;
507 case INQUIRY:
508 if (!scsi_target_emulate_inquiry(r)) {
509 goto illegal_request;
511 break;
512 case REQUEST_SENSE:
513 scsi_target_alloc_buf(&r->req, scsi_sense_len(req));
514 if (req->lun != 0) {
515 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED);
517 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer,
518 sense, fixed_sense);
519 } else {
520 r->len = scsi_device_get_sense(r->req.dev, r->buf,
521 MIN(req->cmd.xfer, r->buf_len),
522 fixed_sense);
524 if (r->req.dev->sense_is_ua) {
525 scsi_device_unit_attention_reported(req->dev);
526 r->req.dev->sense_len = 0;
527 r->req.dev->sense_is_ua = false;
529 break;
530 case TEST_UNIT_READY:
531 break;
532 default:
533 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE));
534 scsi_req_complete(req, CHECK_CONDITION);
535 return 0;
536 illegal_request:
537 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD));
538 scsi_req_complete(req, CHECK_CONDITION);
539 return 0;
542 if (!r->len) {
543 scsi_req_complete(req, GOOD);
545 return r->len;
548 static void scsi_target_read_data(SCSIRequest *req)
550 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
551 uint32_t n;
553 n = r->len;
554 if (n > 0) {
555 r->len = 0;
556 scsi_req_data(&r->req, n);
557 } else {
558 scsi_req_complete(&r->req, GOOD);
562 static uint8_t *scsi_target_get_buf(SCSIRequest *req)
564 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
566 return r->buf;
569 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len)
571 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
573 r->buf = g_malloc(len);
574 r->buf_len = len;
576 return r->buf;
579 static void scsi_target_free_buf(SCSIRequest *req)
581 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req);
583 g_free(r->buf);
586 static const struct SCSIReqOps reqops_target_command = {
587 .size = sizeof(SCSITargetReq),
588 .send_command = scsi_target_send_command,
589 .read_data = scsi_target_read_data,
590 .get_buf = scsi_target_get_buf,
591 .free_req = scsi_target_free_buf,
595 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d,
596 uint32_t tag, uint32_t lun, void *hba_private)
598 SCSIRequest *req;
599 SCSIBus *bus = scsi_bus_from_device(d);
600 BusState *qbus = BUS(bus);
601 const int memset_off = offsetof(SCSIRequest, sense)
602 + sizeof(req->sense);
604 req = g_malloc(reqops->size);
605 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off);
606 req->refcount = 1;
607 req->bus = bus;
608 req->dev = d;
609 req->tag = tag;
610 req->lun = lun;
611 req->hba_private = hba_private;
612 req->status = -1;
613 req->ops = reqops;
614 object_ref(OBJECT(d));
615 object_ref(OBJECT(qbus->parent));
616 notifier_list_init(&req->cancel_notifiers);
617 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag);
618 return req;
621 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
622 uint8_t *buf, void *hba_private)
624 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus);
625 const SCSIReqOps *ops;
626 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d);
627 SCSIRequest *req;
628 SCSICommand cmd = { .len = 0 };
629 int ret;
631 if ((d->unit_attention.key == UNIT_ATTENTION ||
632 bus->unit_attention.key == UNIT_ATTENTION) &&
633 (buf[0] != INQUIRY &&
634 buf[0] != REPORT_LUNS &&
635 buf[0] != GET_CONFIGURATION &&
636 buf[0] != GET_EVENT_STATUS_NOTIFICATION &&
639 * If we already have a pending unit attention condition,
640 * report this one before triggering another one.
642 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) {
643 ops = &reqops_unit_attention;
644 } else if (lun != d->lun ||
645 buf[0] == REPORT_LUNS ||
646 (buf[0] == REQUEST_SENSE && d->sense_len)) {
647 ops = &reqops_target_command;
648 } else {
649 ops = NULL;
652 if (ops != NULL || !sc->parse_cdb) {
653 ret = scsi_req_parse_cdb(d, &cmd, buf);
654 } else {
655 ret = sc->parse_cdb(d, &cmd, buf, hba_private);
658 if (ret != 0) {
659 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]);
660 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private);
661 } else {
662 assert(cmd.len != 0);
663 trace_scsi_req_parsed(d->id, lun, tag, buf[0],
664 cmd.mode, cmd.xfer);
665 if (cmd.lba != -1) {
666 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0],
667 cmd.lba);
670 if (cmd.xfer > INT32_MAX) {
671 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private);
672 } else if (ops) {
673 req = scsi_req_alloc(ops, d, tag, lun, hba_private);
674 } else {
675 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private);
679 req->cmd = cmd;
680 req->resid = req->cmd.xfer;
682 switch (buf[0]) {
683 case INQUIRY:
684 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]);
685 break;
686 case TEST_UNIT_READY:
687 trace_scsi_test_unit_ready(d->id, lun, tag);
688 break;
689 case REPORT_LUNS:
690 trace_scsi_report_luns(d->id, lun, tag);
691 break;
692 case REQUEST_SENSE:
693 trace_scsi_request_sense(d->id, lun, tag);
694 break;
695 default:
696 break;
699 return req;
702 uint8_t *scsi_req_get_buf(SCSIRequest *req)
704 return req->ops->get_buf(req);
707 static void scsi_clear_unit_attention(SCSIRequest *req)
709 SCSISense *ua;
710 if (req->dev->unit_attention.key != UNIT_ATTENTION &&
711 req->bus->unit_attention.key != UNIT_ATTENTION) {
712 return;
716 * If an INQUIRY command enters the enabled command state,
717 * the device server shall [not] clear any unit attention condition;
718 * See also MMC-6, paragraphs 6.5 and 6.6.2.
720 if (req->cmd.buf[0] == INQUIRY ||
721 req->cmd.buf[0] == GET_CONFIGURATION ||
722 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) {
723 return;
726 if (req->dev->unit_attention.key == UNIT_ATTENTION) {
727 ua = &req->dev->unit_attention;
728 } else {
729 ua = &req->bus->unit_attention;
733 * If a REPORT LUNS command enters the enabled command state, [...]
734 * the device server shall clear any pending unit attention condition
735 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED.
737 if (req->cmd.buf[0] == REPORT_LUNS &&
738 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc &&
739 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) {
740 return;
743 *ua = SENSE_CODE(NO_SENSE);
746 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len)
748 int ret;
750 assert(len >= 14);
751 if (!req->sense_len) {
752 return 0;
755 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true);
758 * FIXME: clearing unit attention conditions upon autosense should be done
759 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b
760 * (SAM-5, 5.14).
762 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and
763 * 10b for HBAs that do not support it (do not call scsi_req_get_sense).
764 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b.
766 if (req->dev->sense_is_ua) {
767 scsi_device_unit_attention_reported(req->dev);
768 req->dev->sense_len = 0;
769 req->dev->sense_is_ua = false;
771 return ret;
774 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed)
776 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed);
779 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense)
781 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag,
782 sense.key, sense.asc, sense.ascq);
783 req->sense_len = scsi_build_sense(req->sense, sense);
786 static void scsi_req_enqueue_internal(SCSIRequest *req)
788 assert(!req->enqueued);
789 scsi_req_ref(req);
790 if (req->bus->info->get_sg_list) {
791 req->sg = req->bus->info->get_sg_list(req);
792 } else {
793 req->sg = NULL;
795 req->enqueued = true;
796 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next);
799 int32_t scsi_req_enqueue(SCSIRequest *req)
801 int32_t rc;
803 assert(!req->retry);
804 scsi_req_enqueue_internal(req);
805 scsi_req_ref(req);
806 rc = req->ops->send_command(req, req->cmd.buf);
807 scsi_req_unref(req);
808 return rc;
811 static void scsi_req_dequeue(SCSIRequest *req)
813 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag);
814 req->retry = false;
815 if (req->enqueued) {
816 QTAILQ_REMOVE(&req->dev->requests, req, next);
817 req->enqueued = false;
818 scsi_req_unref(req);
822 static int scsi_get_performance_length(int num_desc, int type, int data_type)
824 /* MMC-6, paragraph 6.7. */
825 switch (type) {
826 case 0:
827 if ((data_type & 3) == 0) {
828 /* Each descriptor is as in Table 295 - Nominal performance. */
829 return 16 * num_desc + 8;
830 } else {
831 /* Each descriptor is as in Table 296 - Exceptions. */
832 return 6 * num_desc + 8;
834 case 1:
835 case 4:
836 case 5:
837 return 8 * num_desc + 8;
838 case 2:
839 return 2048 * num_desc + 8;
840 case 3:
841 return 16 * num_desc + 8;
842 default:
843 return 8;
847 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
849 int byte_block = (buf[2] >> 2) & 0x1;
850 int type = (buf[2] >> 4) & 0x1;
851 int xfer_unit;
853 if (byte_block) {
854 if (type) {
855 xfer_unit = dev->blocksize;
856 } else {
857 xfer_unit = 512;
859 } else {
860 xfer_unit = 1;
863 return xfer_unit;
866 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf)
868 int length = buf[2] & 0x3;
869 int xfer;
870 int unit = ata_passthrough_xfer_unit(dev, buf);
872 switch (length) {
873 case 0:
874 case 3: /* USB-specific. */
875 default:
876 xfer = 0;
877 break;
878 case 1:
879 xfer = buf[3];
880 break;
881 case 2:
882 xfer = buf[4];
883 break;
886 return xfer * unit;
889 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf)
891 int extend = buf[1] & 0x1;
892 int length = buf[2] & 0x3;
893 int xfer;
894 int unit = ata_passthrough_xfer_unit(dev, buf);
896 switch (length) {
897 case 0:
898 case 3: /* USB-specific. */
899 default:
900 xfer = 0;
901 break;
902 case 1:
903 xfer = buf[4];
904 xfer |= (extend ? buf[3] << 8 : 0);
905 break;
906 case 2:
907 xfer = buf[6];
908 xfer |= (extend ? buf[5] << 8 : 0);
909 break;
912 return xfer * unit;
915 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
917 cmd->xfer = scsi_cdb_xfer(buf);
918 switch (buf[0]) {
919 case TEST_UNIT_READY:
920 case REWIND:
921 case START_STOP:
922 case SET_CAPACITY:
923 case WRITE_FILEMARKS:
924 case WRITE_FILEMARKS_16:
925 case SPACE:
926 case RESERVE:
927 case RELEASE:
928 case ERASE:
929 case ALLOW_MEDIUM_REMOVAL:
930 case SEEK_10:
931 case SYNCHRONIZE_CACHE:
932 case SYNCHRONIZE_CACHE_16:
933 case LOCATE_16:
934 case LOCK_UNLOCK_CACHE:
935 case SET_CD_SPEED:
936 case SET_LIMITS:
937 case WRITE_LONG_10:
938 case UPDATE_BLOCK:
939 case RESERVE_TRACK:
940 case SET_READ_AHEAD:
941 case PRE_FETCH:
942 case PRE_FETCH_16:
943 case ALLOW_OVERWRITE:
944 cmd->xfer = 0;
945 break;
946 case VERIFY_10:
947 case VERIFY_12:
948 case VERIFY_16:
949 if ((buf[1] & 2) == 0) {
950 cmd->xfer = 0;
951 } else if ((buf[1] & 4) != 0) {
952 cmd->xfer = 1;
954 cmd->xfer *= dev->blocksize;
955 break;
956 case MODE_SENSE:
957 break;
958 case WRITE_SAME_10:
959 case WRITE_SAME_16:
960 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize;
961 break;
962 case READ_CAPACITY_10:
963 cmd->xfer = 8;
964 break;
965 case READ_BLOCK_LIMITS:
966 cmd->xfer = 6;
967 break;
968 case SEND_VOLUME_TAG:
969 /* GPCMD_SET_STREAMING from multimedia commands. */
970 if (dev->type == TYPE_ROM) {
971 cmd->xfer = buf[10] | (buf[9] << 8);
972 } else {
973 cmd->xfer = buf[9] | (buf[8] << 8);
975 break;
976 case WRITE_6:
977 /* length 0 means 256 blocks */
978 if (cmd->xfer == 0) {
979 cmd->xfer = 256;
981 /* fall through */
982 case WRITE_10:
983 case WRITE_VERIFY_10:
984 case WRITE_12:
985 case WRITE_VERIFY_12:
986 case WRITE_16:
987 case WRITE_VERIFY_16:
988 cmd->xfer *= dev->blocksize;
989 break;
990 case READ_6:
991 case READ_REVERSE:
992 /* length 0 means 256 blocks */
993 if (cmd->xfer == 0) {
994 cmd->xfer = 256;
996 /* fall through */
997 case READ_10:
998 case READ_12:
999 case READ_16:
1000 cmd->xfer *= dev->blocksize;
1001 break;
1002 case FORMAT_UNIT:
1003 /* MMC mandates the parameter list to be 12-bytes long. Parameters
1004 * for block devices are restricted to the header right now. */
1005 if (dev->type == TYPE_ROM && (buf[1] & 16)) {
1006 cmd->xfer = 12;
1007 } else {
1008 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4);
1010 break;
1011 case INQUIRY:
1012 case RECEIVE_DIAGNOSTIC:
1013 case SEND_DIAGNOSTIC:
1014 cmd->xfer = buf[4] | (buf[3] << 8);
1015 break;
1016 case READ_CD:
1017 case READ_BUFFER:
1018 case WRITE_BUFFER:
1019 case SEND_CUE_SHEET:
1020 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1021 break;
1022 case PERSISTENT_RESERVE_OUT:
1023 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL;
1024 break;
1025 case ERASE_12:
1026 if (dev->type == TYPE_ROM) {
1027 /* MMC command GET PERFORMANCE. */
1028 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8),
1029 buf[10], buf[1] & 0x1f);
1031 break;
1032 case MECHANISM_STATUS:
1033 case READ_DVD_STRUCTURE:
1034 case SEND_DVD_STRUCTURE:
1035 case MAINTENANCE_OUT:
1036 case MAINTENANCE_IN:
1037 if (dev->type == TYPE_ROM) {
1038 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */
1039 cmd->xfer = buf[9] | (buf[8] << 8);
1041 break;
1042 case ATA_PASSTHROUGH_12:
1043 if (dev->type == TYPE_ROM) {
1044 /* BLANK command of MMC */
1045 cmd->xfer = 0;
1046 } else {
1047 cmd->xfer = ata_passthrough_12_xfer(dev, buf);
1049 break;
1050 case ATA_PASSTHROUGH_16:
1051 cmd->xfer = ata_passthrough_16_xfer(dev, buf);
1052 break;
1054 return 0;
1057 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1059 switch (buf[0]) {
1060 /* stream commands */
1061 case ERASE_12:
1062 case ERASE_16:
1063 cmd->xfer = 0;
1064 break;
1065 case READ_6:
1066 case READ_REVERSE:
1067 case RECOVER_BUFFERED_DATA:
1068 case WRITE_6:
1069 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
1070 if (buf[1] & 0x01) { /* fixed */
1071 cmd->xfer *= dev->blocksize;
1073 break;
1074 case READ_16:
1075 case READ_REVERSE_16:
1076 case VERIFY_16:
1077 case WRITE_16:
1078 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
1079 if (buf[1] & 0x01) { /* fixed */
1080 cmd->xfer *= dev->blocksize;
1082 break;
1083 case REWIND:
1084 case LOAD_UNLOAD:
1085 cmd->xfer = 0;
1086 break;
1087 case SPACE_16:
1088 cmd->xfer = buf[13] | (buf[12] << 8);
1089 break;
1090 case READ_POSITION:
1091 switch (buf[1] & 0x1f) /* operation code */ {
1092 case SHORT_FORM_BLOCK_ID:
1093 case SHORT_FORM_VENDOR_SPECIFIC:
1094 cmd->xfer = 20;
1095 break;
1096 case LONG_FORM:
1097 cmd->xfer = 32;
1098 break;
1099 case EXTENDED_FORM:
1100 cmd->xfer = buf[8] | (buf[7] << 8);
1101 break;
1102 default:
1103 return -1;
1106 break;
1107 case FORMAT_UNIT:
1108 cmd->xfer = buf[4] | (buf[3] << 8);
1109 break;
1110 /* generic commands */
1111 default:
1112 return scsi_req_xfer(cmd, dev, buf);
1114 return 0;
1117 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1119 switch (buf[0]) {
1120 /* medium changer commands */
1121 case EXCHANGE_MEDIUM:
1122 case INITIALIZE_ELEMENT_STATUS:
1123 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
1124 case MOVE_MEDIUM:
1125 case POSITION_TO_ELEMENT:
1126 cmd->xfer = 0;
1127 break;
1128 case READ_ELEMENT_STATUS:
1129 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
1130 break;
1132 /* generic commands */
1133 default:
1134 return scsi_req_xfer(cmd, dev, buf);
1136 return 0;
1139 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
1141 switch (buf[0]) {
1142 /* Scanner commands */
1143 case OBJECT_POSITION:
1144 cmd->xfer = 0;
1145 break;
1146 case SCAN:
1147 cmd->xfer = buf[4];
1148 break;
1149 case READ_10:
1150 case SEND:
1151 case GET_WINDOW:
1152 case SET_WINDOW:
1153 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16);
1154 break;
1155 default:
1156 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */
1157 return scsi_req_xfer(cmd, dev, buf);
1160 return 0;
1163 static void scsi_cmd_xfer_mode(SCSICommand *cmd)
1165 if (!cmd->xfer) {
1166 cmd->mode = SCSI_XFER_NONE;
1167 return;
1169 switch (cmd->buf[0]) {
1170 case WRITE_6:
1171 case WRITE_10:
1172 case WRITE_VERIFY_10:
1173 case WRITE_12:
1174 case WRITE_VERIFY_12:
1175 case WRITE_16:
1176 case WRITE_VERIFY_16:
1177 case VERIFY_10:
1178 case VERIFY_12:
1179 case VERIFY_16:
1180 case COPY:
1181 case COPY_VERIFY:
1182 case COMPARE:
1183 case CHANGE_DEFINITION:
1184 case LOG_SELECT:
1185 case MODE_SELECT:
1186 case MODE_SELECT_10:
1187 case SEND_DIAGNOSTIC:
1188 case WRITE_BUFFER:
1189 case FORMAT_UNIT:
1190 case REASSIGN_BLOCKS:
1191 case SEARCH_EQUAL:
1192 case SEARCH_HIGH:
1193 case SEARCH_LOW:
1194 case UPDATE_BLOCK:
1195 case WRITE_LONG_10:
1196 case WRITE_SAME_10:
1197 case WRITE_SAME_16:
1198 case UNMAP:
1199 case SEARCH_HIGH_12:
1200 case SEARCH_EQUAL_12:
1201 case SEARCH_LOW_12:
1202 case MEDIUM_SCAN:
1203 case SEND_VOLUME_TAG:
1204 case SEND_CUE_SHEET:
1205 case SEND_DVD_STRUCTURE:
1206 case PERSISTENT_RESERVE_OUT:
1207 case MAINTENANCE_OUT:
1208 case SET_WINDOW:
1209 case SCAN:
1210 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for
1211 * non-scanner devices, so we only get here for SCAN and not for START_STOP.
1213 cmd->mode = SCSI_XFER_TO_DEV;
1214 break;
1215 case ATA_PASSTHROUGH_12:
1216 case ATA_PASSTHROUGH_16:
1217 /* T_DIR */
1218 cmd->mode = (cmd->buf[2] & 0x8) ?
1219 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
1220 break;
1221 default:
1222 cmd->mode = SCSI_XFER_FROM_DEV;
1223 break;
1227 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf)
1229 int rc;
1230 int len;
1232 cmd->lba = -1;
1233 len = scsi_cdb_length(buf);
1234 if (len < 0) {
1235 return -1;
1238 cmd->len = len;
1239 switch (dev->type) {
1240 case TYPE_TAPE:
1241 rc = scsi_req_stream_xfer(cmd, dev, buf);
1242 break;
1243 case TYPE_MEDIUM_CHANGER:
1244 rc = scsi_req_medium_changer_xfer(cmd, dev, buf);
1245 break;
1246 case TYPE_SCANNER:
1247 rc = scsi_req_scanner_length(cmd, dev, buf);
1248 break;
1249 default:
1250 rc = scsi_req_xfer(cmd, dev, buf);
1251 break;
1254 if (rc != 0)
1255 return rc;
1257 memcpy(cmd->buf, buf, cmd->len);
1258 scsi_cmd_xfer_mode(cmd);
1259 cmd->lba = scsi_cmd_lba(cmd);
1260 return 0;
1263 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
1265 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
1267 scsi_device_set_ua(dev, sense);
1268 if (bus->info->change) {
1269 bus->info->change(bus, dev, sense);
1273 SCSIRequest *scsi_req_ref(SCSIRequest *req)
1275 assert(req->refcount > 0);
1276 req->refcount++;
1277 return req;
1280 void scsi_req_unref(SCSIRequest *req)
1282 assert(req->refcount > 0);
1283 if (--req->refcount == 0) {
1284 BusState *qbus = req->dev->qdev.parent_bus;
1285 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus);
1287 if (bus->info->free_request && req->hba_private) {
1288 bus->info->free_request(bus, req->hba_private);
1290 if (req->ops->free_req) {
1291 req->ops->free_req(req);
1293 object_unref(OBJECT(req->dev));
1294 object_unref(OBJECT(qbus->parent));
1295 g_free(req);
1299 /* Tell the device that we finished processing this chunk of I/O. It
1300 will start the next chunk or complete the command. */
1301 void scsi_req_continue(SCSIRequest *req)
1303 if (req->io_canceled) {
1304 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag);
1305 return;
1307 trace_scsi_req_continue(req->dev->id, req->lun, req->tag);
1308 if (req->cmd.mode == SCSI_XFER_TO_DEV) {
1309 req->ops->write_data(req);
1310 } else {
1311 req->ops->read_data(req);
1315 /* Called by the devices when data is ready for the HBA. The HBA should
1316 start a DMA operation to read or fill the device's data buffer.
1317 Once it completes, calling scsi_req_continue will restart I/O. */
1318 void scsi_req_data(SCSIRequest *req, int len)
1320 uint8_t *buf;
1321 if (req->io_canceled) {
1322 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len);
1323 return;
1325 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len);
1326 assert(req->cmd.mode != SCSI_XFER_NONE);
1327 if (!req->sg) {
1328 req->resid -= len;
1329 req->bus->info->transfer_data(req, len);
1330 return;
1333 /* If the device calls scsi_req_data and the HBA specified a
1334 * scatter/gather list, the transfer has to happen in a single
1335 * step. */
1336 assert(!req->dma_started);
1337 req->dma_started = true;
1339 buf = scsi_req_get_buf(req);
1340 if (req->cmd.mode == SCSI_XFER_FROM_DEV) {
1341 req->resid = dma_buf_read(buf, len, req->sg);
1342 } else {
1343 req->resid = dma_buf_write(buf, len, req->sg);
1345 scsi_req_continue(req);
1348 void scsi_req_print(SCSIRequest *req)
1350 FILE *fp = stderr;
1351 int i;
1353 fprintf(fp, "[%s id=%d] %s",
1354 req->dev->qdev.parent_bus->name,
1355 req->dev->id,
1356 scsi_command_name(req->cmd.buf[0]));
1357 for (i = 1; i < req->cmd.len; i++) {
1358 fprintf(fp, " 0x%02x", req->cmd.buf[i]);
1360 switch (req->cmd.mode) {
1361 case SCSI_XFER_NONE:
1362 fprintf(fp, " - none\n");
1363 break;
1364 case SCSI_XFER_FROM_DEV:
1365 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer);
1366 break;
1367 case SCSI_XFER_TO_DEV:
1368 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer);
1369 break;
1370 default:
1371 fprintf(fp, " - Oops\n");
1372 break;
1376 void scsi_req_complete(SCSIRequest *req, int status)
1378 assert(req->status == -1);
1379 req->status = status;
1381 assert(req->sense_len <= sizeof(req->sense));
1382 if (status == GOOD) {
1383 req->sense_len = 0;
1386 if (req->sense_len) {
1387 memcpy(req->dev->sense, req->sense, req->sense_len);
1388 req->dev->sense_len = req->sense_len;
1389 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention);
1390 } else {
1391 req->dev->sense_len = 0;
1392 req->dev->sense_is_ua = false;
1396 * Unit attention state is now stored in the device's sense buffer
1397 * if the HBA didn't do autosense. Clear the pending unit attention
1398 * flags.
1400 scsi_clear_unit_attention(req);
1402 scsi_req_ref(req);
1403 scsi_req_dequeue(req);
1404 req->bus->info->complete(req, req->status, req->resid);
1406 /* Cancelled requests might end up being completed instead of cancelled */
1407 notifier_list_notify(&req->cancel_notifiers, req);
1408 scsi_req_unref(req);
1411 /* Called by the devices when the request is canceled. */
1412 void scsi_req_cancel_complete(SCSIRequest *req)
1414 assert(req->io_canceled);
1415 if (req->bus->info->cancel) {
1416 req->bus->info->cancel(req);
1418 notifier_list_notify(&req->cancel_notifiers, req);
1419 scsi_req_unref(req);
1422 /* Cancel @req asynchronously. @notifier is added to @req's cancellation
1423 * notifier list, the bus will be notified the requests cancellation is
1424 * completed.
1425 * */
1426 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
1428 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1429 if (notifier) {
1430 notifier_list_add(&req->cancel_notifiers, notifier);
1432 if (req->io_canceled) {
1433 /* A blk_aio_cancel_async is pending; when it finishes,
1434 * scsi_req_cancel_complete will be called and will
1435 * call the notifier we just added. Just wait for that.
1437 assert(req->aiocb);
1438 return;
1440 /* Dropped in scsi_req_cancel_complete. */
1441 scsi_req_ref(req);
1442 scsi_req_dequeue(req);
1443 req->io_canceled = true;
1444 if (req->aiocb) {
1445 blk_aio_cancel_async(req->aiocb);
1446 } else {
1447 scsi_req_cancel_complete(req);
1451 void scsi_req_cancel(SCSIRequest *req)
1453 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
1454 if (!req->enqueued) {
1455 return;
1457 assert(!req->io_canceled);
1458 /* Dropped in scsi_req_cancel_complete. */
1459 scsi_req_ref(req);
1460 scsi_req_dequeue(req);
1461 req->io_canceled = true;
1462 if (req->aiocb) {
1463 blk_aio_cancel(req->aiocb);
1464 } else {
1465 scsi_req_cancel_complete(req);
1469 static int scsi_ua_precedence(SCSISense sense)
1471 if (sense.key != UNIT_ATTENTION) {
1472 return INT_MAX;
1474 if (sense.asc == 0x29 && sense.ascq == 0x04) {
1475 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
1476 return 1;
1477 } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
1478 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
1479 return 2;
1480 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
1481 /* These two go with "all others". */
1483 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
1484 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
1485 * POWER ON OCCURRED = 1
1486 * SCSI BUS RESET OCCURRED = 2
1487 * BUS DEVICE RESET FUNCTION OCCURRED = 3
1488 * I_T NEXUS LOSS OCCURRED = 7
1490 return sense.ascq;
1491 } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
1492 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
1493 return 8;
1495 return (sense.asc << 8) | sense.ascq;
1498 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
1500 int prec1, prec2;
1501 if (sense.key != UNIT_ATTENTION) {
1502 return;
1504 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
1505 sense.asc, sense.ascq);
1508 * Override a pre-existing unit attention condition, except for a more
1509 * important reset condition.
1511 prec1 = scsi_ua_precedence(sdev->unit_attention);
1512 prec2 = scsi_ua_precedence(sense);
1513 if (prec2 < prec1) {
1514 sdev->unit_attention = sense;
1518 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
1520 SCSIRequest *req;
1522 aio_context_acquire(blk_get_aio_context(sdev->conf.blk));
1523 while (!QTAILQ_EMPTY(&sdev->requests)) {
1524 req = QTAILQ_FIRST(&sdev->requests);
1525 scsi_req_cancel_async(req, NULL);
1527 blk_drain(sdev->conf.blk);
1528 aio_context_release(blk_get_aio_context(sdev->conf.blk));
1529 scsi_device_set_ua(sdev, sense);
1532 static char *scsibus_get_dev_path(DeviceState *dev)
1534 SCSIDevice *d = SCSI_DEVICE(dev);
1535 DeviceState *hba = dev->parent_bus->parent;
1536 char *id;
1537 char *path;
1539 id = qdev_get_dev_path(hba);
1540 if (id) {
1541 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun);
1542 } else {
1543 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun);
1545 g_free(id);
1546 return path;
1549 static char *scsibus_get_fw_dev_path(DeviceState *dev)
1551 SCSIDevice *d = SCSI_DEVICE(dev);
1552 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel,
1553 qdev_fw_name(dev), d->id, d->lun);
1556 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun)
1558 BusChild *kid;
1559 SCSIDevice *target_dev = NULL;
1561 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, sibling) {
1562 DeviceState *qdev = kid->child;
1563 SCSIDevice *dev = SCSI_DEVICE(qdev);
1565 if (dev->channel == channel && dev->id == id) {
1566 if (dev->lun == lun) {
1567 return dev;
1569 target_dev = dev;
1572 return target_dev;
1575 /* SCSI request list. For simplicity, pv points to the whole device */
1577 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size,
1578 const VMStateField *field, QJSON *vmdesc)
1580 SCSIDevice *s = pv;
1581 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1582 SCSIRequest *req;
1584 QTAILQ_FOREACH(req, &s->requests, next) {
1585 assert(!req->io_canceled);
1586 assert(req->status == -1);
1587 assert(req->enqueued);
1589 qemu_put_sbyte(f, req->retry ? 1 : 2);
1590 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf));
1591 qemu_put_be32s(f, &req->tag);
1592 qemu_put_be32s(f, &req->lun);
1593 if (bus->info->save_request) {
1594 bus->info->save_request(f, req);
1596 if (req->ops->save_request) {
1597 req->ops->save_request(f, req);
1600 qemu_put_sbyte(f, 0);
1602 return 0;
1605 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size,
1606 const VMStateField *field)
1608 SCSIDevice *s = pv;
1609 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus);
1610 int8_t sbyte;
1612 while ((sbyte = qemu_get_sbyte(f)) > 0) {
1613 uint8_t buf[SCSI_CMD_BUF_SIZE];
1614 uint32_t tag;
1615 uint32_t lun;
1616 SCSIRequest *req;
1618 qemu_get_buffer(f, buf, sizeof(buf));
1619 qemu_get_be32s(f, &tag);
1620 qemu_get_be32s(f, &lun);
1621 req = scsi_req_new(s, tag, lun, buf, NULL);
1622 req->retry = (sbyte == 1);
1623 if (bus->info->load_request) {
1624 req->hba_private = bus->info->load_request(f, req);
1626 if (req->ops->load_request) {
1627 req->ops->load_request(f, req);
1630 /* Just restart it later. */
1631 scsi_req_enqueue_internal(req);
1633 /* At this point, the request will be kept alive by the reference
1634 * added by scsi_req_enqueue_internal, so we can release our reference.
1635 * The HBA of course will add its own reference in the load_request
1636 * callback if it needs to hold on the SCSIRequest.
1638 scsi_req_unref(req);
1641 return 0;
1644 static const VMStateInfo vmstate_info_scsi_requests = {
1645 .name = "scsi-requests",
1646 .get = get_scsi_requests,
1647 .put = put_scsi_requests,
1650 static bool scsi_sense_state_needed(void *opaque)
1652 SCSIDevice *s = opaque;
1654 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD;
1657 static const VMStateDescription vmstate_scsi_sense_state = {
1658 .name = "SCSIDevice/sense",
1659 .version_id = 1,
1660 .minimum_version_id = 1,
1661 .needed = scsi_sense_state_needed,
1662 .fields = (VMStateField[]) {
1663 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice,
1664 SCSI_SENSE_BUF_SIZE_OLD,
1665 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD),
1666 VMSTATE_END_OF_LIST()
1670 const VMStateDescription vmstate_scsi_device = {
1671 .name = "SCSIDevice",
1672 .version_id = 1,
1673 .minimum_version_id = 1,
1674 .fields = (VMStateField[]) {
1675 VMSTATE_UINT8(unit_attention.key, SCSIDevice),
1676 VMSTATE_UINT8(unit_attention.asc, SCSIDevice),
1677 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice),
1678 VMSTATE_BOOL(sense_is_ua, SCSIDevice),
1679 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD),
1680 VMSTATE_UINT32(sense_len, SCSIDevice),
1682 .name = "requests",
1683 .version_id = 0,
1684 .field_exists = NULL,
1685 .size = 0, /* ouch */
1686 .info = &vmstate_info_scsi_requests,
1687 .flags = VMS_SINGLE,
1688 .offset = 0,
1690 VMSTATE_END_OF_LIST()
1692 .subsections = (const VMStateDescription*[]) {
1693 &vmstate_scsi_sense_state,
1694 NULL
1698 static void scsi_device_class_init(ObjectClass *klass, void *data)
1700 DeviceClass *k = DEVICE_CLASS(klass);
1701 set_bit(DEVICE_CATEGORY_STORAGE, k->categories);
1702 k->bus_type = TYPE_SCSI_BUS;
1703 k->realize = scsi_qdev_realize;
1704 k->unrealize = scsi_qdev_unrealize;
1705 k->props = scsi_props;
1708 static void scsi_dev_instance_init(Object *obj)
1710 DeviceState *dev = DEVICE(obj);
1711 SCSIDevice *s = SCSI_DEVICE(dev);
1713 device_add_bootindex_property(obj, &s->conf.bootindex,
1714 "bootindex", NULL,
1715 &s->qdev, NULL);
1718 static const TypeInfo scsi_device_type_info = {
1719 .name = TYPE_SCSI_DEVICE,
1720 .parent = TYPE_DEVICE,
1721 .instance_size = sizeof(SCSIDevice),
1722 .abstract = true,
1723 .class_size = sizeof(SCSIDeviceClass),
1724 .class_init = scsi_device_class_init,
1725 .instance_init = scsi_dev_instance_init,
1728 static void scsi_register_types(void)
1730 type_register_static(&scsi_bus_info);
1731 type_register_static(&scsi_device_type_info);
1734 type_init(scsi_register_types)