4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include <qemu-common.h>
16 #include "virtio-blk.h"
17 #include "block_int.h"
19 typedef struct VirtIOBlock
27 static VirtIOBlock
*to_virtio_blk(VirtIODevice
*vdev
)
29 return (VirtIOBlock
*)vdev
;
32 typedef struct VirtIOBlockReq
35 VirtQueueElement elem
;
36 struct virtio_blk_inhdr
*in
;
37 struct virtio_blk_outhdr
*out
;
39 struct VirtIOBlockReq
*next
;
42 static void virtio_blk_req_complete(VirtIOBlockReq
*req
, int status
)
44 VirtIOBlock
*s
= req
->dev
;
46 req
->in
->status
= status
;
47 virtqueue_push(s
->vq
, &req
->elem
, req
->qiov
.size
+ sizeof(*req
->in
));
48 virtio_notify(&s
->vdev
, s
->vq
);
53 static int virtio_blk_handle_write_error(VirtIOBlockReq
*req
, int error
)
55 BlockInterfaceErrorAction action
= drive_get_onerror(req
->dev
->bs
);
56 VirtIOBlock
*s
= req
->dev
;
58 if (action
== BLOCK_ERR_IGNORE
)
61 if ((error
== ENOSPC
&& action
== BLOCK_ERR_STOP_ENOSPC
)
62 || action
== BLOCK_ERR_STOP_ANY
) {
67 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
73 static void virtio_blk_rw_complete(void *opaque
, int ret
)
75 VirtIOBlockReq
*req
= opaque
;
77 if (ret
&& (req
->out
->type
& VIRTIO_BLK_T_OUT
)) {
78 if (virtio_blk_handle_write_error(req
, -ret
))
82 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
85 static VirtIOBlockReq
*virtio_blk_alloc_request(VirtIOBlock
*s
)
87 VirtIOBlockReq
*req
= qemu_mallocz(sizeof(*req
));
92 static VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
)
94 VirtIOBlockReq
*req
= virtio_blk_alloc_request(s
);
97 if (!virtqueue_pop(s
->vq
, &req
->elem
)) {
106 static void virtio_blk_handle_write(VirtIOBlockReq
*req
)
108 bdrv_aio_writev(req
->dev
->bs
, req
->out
->sector
, &req
->qiov
,
109 req
->qiov
.size
/ 512, virtio_blk_rw_complete
, req
);
112 static void virtio_blk_handle_read(VirtIOBlockReq
*req
)
114 bdrv_aio_readv(req
->dev
->bs
, req
->out
->sector
, &req
->qiov
,
115 req
->qiov
.size
/ 512, virtio_blk_rw_complete
, req
);
118 static void virtio_blk_handle_output(VirtIODevice
*vdev
, VirtQueue
*vq
)
120 VirtIOBlock
*s
= to_virtio_blk(vdev
);
123 while ((req
= virtio_blk_get_request(s
))) {
124 if (req
->elem
.out_num
< 1 || req
->elem
.in_num
< 1) {
125 fprintf(stderr
, "virtio-blk missing headers\n");
129 if (req
->elem
.out_sg
[0].iov_len
< sizeof(*req
->out
) ||
130 req
->elem
.in_sg
[req
->elem
.in_num
- 1].iov_len
< sizeof(*req
->in
)) {
131 fprintf(stderr
, "virtio-blk header not in correct element\n");
135 req
->out
= (void *)req
->elem
.out_sg
[0].iov_base
;
136 req
->in
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 1].iov_base
;
138 if (req
->out
->type
& VIRTIO_BLK_T_SCSI_CMD
) {
139 unsigned int len
= sizeof(*req
->in
);
141 req
->in
->status
= VIRTIO_BLK_S_UNSUPP
;
142 virtqueue_push(vq
, &req
->elem
, len
);
143 virtio_notify(vdev
, vq
);
145 } else if (req
->out
->type
& VIRTIO_BLK_T_OUT
) {
146 qemu_iovec_init_external(&req
->qiov
, &req
->elem
.out_sg
[1],
147 req
->elem
.out_num
- 1);
148 virtio_blk_handle_write(req
);
150 qemu_iovec_init_external(&req
->qiov
, &req
->elem
.in_sg
[0],
151 req
->elem
.in_num
- 1);
152 virtio_blk_handle_read(req
);
156 * FIXME: Want to check for completions before returning to guest mode,
157 * so cached reads and writes are reported as quickly as possible. But
158 * that should be done in the generic block layer.
162 static void virtio_blk_dma_restart_cb(void *opaque
, int running
, int reason
)
164 VirtIOBlock
*s
= opaque
;
165 VirtIOBlockReq
*req
= s
->rq
;
173 virtio_blk_handle_write(req
);
178 static void virtio_blk_reset(VirtIODevice
*vdev
)
181 * This should cancel pending requests, but can't do nicely until there
182 * are per-device request lists.
187 static void virtio_blk_update_config(VirtIODevice
*vdev
, uint8_t *config
)
189 VirtIOBlock
*s
= to_virtio_blk(vdev
);
190 struct virtio_blk_config blkcfg
;
192 int cylinders
, heads
, secs
;
194 bdrv_get_geometry(s
->bs
, &capacity
);
195 bdrv_get_geometry_hint(s
->bs
, &cylinders
, &heads
, &secs
);
196 stq_raw(&blkcfg
.capacity
, capacity
);
197 stl_raw(&blkcfg
.seg_max
, 128 - 2);
198 stw_raw(&blkcfg
.cylinders
, cylinders
);
199 blkcfg
.heads
= heads
;
200 blkcfg
.sectors
= secs
;
201 memcpy(config
, &blkcfg
, sizeof(blkcfg
));
204 static uint32_t virtio_blk_get_features(VirtIODevice
*vdev
)
206 return (1 << VIRTIO_BLK_F_SEG_MAX
| 1 << VIRTIO_BLK_F_GEOMETRY
);
209 static void virtio_blk_save(QEMUFile
*f
, void *opaque
)
211 VirtIOBlock
*s
= opaque
;
212 VirtIOBlockReq
*req
= s
->rq
;
214 virtio_save(&s
->vdev
, f
);
217 qemu_put_sbyte(f
, 1);
218 qemu_put_buffer(f
, (unsigned char*)&req
->elem
, sizeof(req
->elem
));
221 qemu_put_sbyte(f
, 0);
224 static int virtio_blk_load(QEMUFile
*f
, void *opaque
, int version_id
)
226 VirtIOBlock
*s
= opaque
;
231 virtio_load(&s
->vdev
, f
);
232 while (qemu_get_sbyte(f
)) {
233 VirtIOBlockReq
*req
= virtio_blk_alloc_request(s
);
234 qemu_get_buffer(f
, (unsigned char*)&req
->elem
, sizeof(req
->elem
));
242 void *virtio_blk_init(PCIBus
*bus
, BlockDriverState
*bs
)
245 int cylinders
, heads
, secs
;
246 static int virtio_blk_id
;
248 s
= (VirtIOBlock
*)virtio_init_pci(bus
, "virtio-blk",
249 PCI_VENDOR_ID_REDHAT_QUMRANET
,
250 PCI_DEVICE_ID_VIRTIO_BLOCK
,
251 PCI_VENDOR_ID_REDHAT_QUMRANET
,
253 PCI_CLASS_STORAGE_OTHER
, 0x00,
254 sizeof(struct virtio_blk_config
), sizeof(VirtIOBlock
));
258 s
->vdev
.get_config
= virtio_blk_update_config
;
259 s
->vdev
.get_features
= virtio_blk_get_features
;
260 s
->vdev
.reset
= virtio_blk_reset
;
263 bs
->private = &s
->vdev
.pci_dev
;
264 bdrv_guess_geometry(s
->bs
, &cylinders
, &heads
, &secs
);
265 bdrv_set_geometry_hint(s
->bs
, cylinders
, heads
, secs
);
267 s
->vq
= virtio_add_queue(&s
->vdev
, 128, virtio_blk_handle_output
);
269 qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb
, s
);
270 register_savevm("virtio-blk", virtio_blk_id
++, 2,
271 virtio_blk_save
, virtio_blk_load
, s
);