4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include <qemu-common.h>
16 #include "virtio-blk.h"
17 #include "block_int.h"
19 typedef struct VirtIOBlock
27 static VirtIOBlock
*to_virtio_blk(VirtIODevice
*vdev
)
29 return (VirtIOBlock
*)vdev
;
32 typedef struct VirtIOBlockReq
35 VirtQueueElement elem
;
36 struct virtio_blk_inhdr
*in
;
37 struct virtio_blk_outhdr
*out
;
40 struct VirtIOBlockReq
*next
;
43 static void virtio_blk_req_complete(VirtIOBlockReq
*req
, int status
)
45 VirtIOBlock
*s
= req
->dev
;
47 req
->in
->status
= status
;
48 virtqueue_push(s
->vq
, &req
->elem
, req
->size
+ sizeof(*req
->in
));
49 virtio_notify(&s
->vdev
, s
->vq
);
51 qemu_free(req
->buffer
);
55 static int virtio_blk_handle_write_error(VirtIOBlockReq
*req
, int error
)
57 BlockInterfaceErrorAction action
= drive_get_onerror(req
->dev
->bs
);
58 VirtIOBlock
*s
= req
->dev
;
60 if (action
== BLOCK_ERR_IGNORE
)
63 if ((error
== ENOSPC
&& action
== BLOCK_ERR_STOP_ENOSPC
)
64 || action
== BLOCK_ERR_STOP_ANY
) {
69 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
75 static void virtio_blk_rw_complete(void *opaque
, int ret
)
77 VirtIOBlockReq
*req
= opaque
;
79 /* Copy read data to the guest */
80 if (!ret
&& !(req
->out
->type
& VIRTIO_BLK_T_OUT
)) {
84 for (i
= 0; i
< req
->elem
.in_num
- 1; i
++) {
87 /* Be pretty defensive wrt malicious guests */
88 len
= MIN(req
->elem
.in_sg
[i
].iov_len
,
91 memcpy(req
->elem
.in_sg
[i
].iov_base
,
96 } else if (ret
&& (req
->out
->type
& VIRTIO_BLK_T_OUT
)) {
97 if (virtio_blk_handle_write_error(req
, -ret
))
101 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
104 static VirtIOBlockReq
*virtio_blk_alloc_request(VirtIOBlock
*s
)
106 VirtIOBlockReq
*req
= qemu_mallocz(sizeof(*req
));
111 static VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
)
113 VirtIOBlockReq
*req
= virtio_blk_alloc_request(s
);
116 if (!virtqueue_pop(s
->vq
, &req
->elem
)) {
125 static int virtio_blk_handle_write(VirtIOBlockReq
*req
)
131 for (i
= 1; i
< req
->elem
.out_num
; i
++)
132 req
->size
+= req
->elem
.out_sg
[i
].iov_len
;
134 req
->buffer
= qemu_memalign(512, req
->size
);
135 if (req
->buffer
== NULL
) {
140 /* We copy the data from the SG list to avoid splitting up the request.
141 This helps performance a lot until we can pass full sg lists as AIO
143 for (i
= 1; i
< req
->elem
.out_num
; i
++) {
146 len
= MIN(req
->elem
.out_sg
[i
].iov_len
,
148 memcpy(req
->buffer
+ offset
,
149 req
->elem
.out_sg
[i
].iov_base
,
155 bdrv_aio_write(req
->dev
->bs
, req
->out
->sector
, req
->buffer
, req
->size
/ 512,
156 virtio_blk_rw_complete
, req
);
160 static void virtio_blk_handle_output(VirtIODevice
*vdev
, VirtQueue
*vq
)
162 VirtIOBlock
*s
= to_virtio_blk(vdev
);
165 while ((req
= virtio_blk_get_request(s
))) {
168 if (req
->elem
.out_num
< 1 || req
->elem
.in_num
< 1) {
169 fprintf(stderr
, "virtio-blk missing headers\n");
173 if (req
->elem
.out_sg
[0].iov_len
< sizeof(*req
->out
) ||
174 req
->elem
.in_sg
[req
->elem
.in_num
- 1].iov_len
< sizeof(*req
->in
)) {
175 fprintf(stderr
, "virtio-blk header not in correct element\n");
179 req
->out
= (void *)req
->elem
.out_sg
[0].iov_base
;
180 req
->in
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 1].iov_base
;
182 if (req
->out
->type
& VIRTIO_BLK_T_SCSI_CMD
) {
183 unsigned int len
= sizeof(*req
->in
);
185 req
->in
->status
= VIRTIO_BLK_S_UNSUPP
;
186 virtqueue_push(vq
, &req
->elem
, len
);
187 virtio_notify(vdev
, vq
);
189 } else if (req
->out
->type
& VIRTIO_BLK_T_OUT
) {
190 if (virtio_blk_handle_write(req
) < 0)
193 for (i
= 0; i
< req
->elem
.in_num
- 1; i
++)
194 req
->size
+= req
->elem
.in_sg
[i
].iov_len
;
196 req
->buffer
= qemu_memalign(512, req
->size
);
197 if (req
->buffer
== NULL
) {
202 bdrv_aio_read(s
->bs
, req
->out
->sector
,
205 virtio_blk_rw_complete
,
210 * FIXME: Want to check for completions before returning to guest mode,
211 * so cached reads and writes are reported as quickly as possible. But
212 * that should be done in the generic block layer.
216 static void virtio_blk_dma_restart_cb(void *opaque
, int running
, int reason
)
218 VirtIOBlock
*s
= opaque
;
219 VirtIOBlockReq
*req
= s
->rq
;
227 virtio_blk_handle_write(req
);
232 static void virtio_blk_reset(VirtIODevice
*vdev
)
235 * This should cancel pending requests, but can't do nicely until there
236 * are per-device request lists.
241 static void virtio_blk_update_config(VirtIODevice
*vdev
, uint8_t *config
)
243 VirtIOBlock
*s
= to_virtio_blk(vdev
);
244 struct virtio_blk_config blkcfg
;
246 int cylinders
, heads
, secs
;
248 bdrv_get_geometry(s
->bs
, &capacity
);
249 bdrv_get_geometry_hint(s
->bs
, &cylinders
, &heads
, &secs
);
250 stq_raw(&blkcfg
.capacity
, capacity
);
251 stl_raw(&blkcfg
.seg_max
, 128 - 2);
252 stw_raw(&blkcfg
.cylinders
, cylinders
);
253 blkcfg
.heads
= heads
;
254 blkcfg
.sectors
= secs
;
255 memcpy(config
, &blkcfg
, sizeof(blkcfg
));
258 static uint32_t virtio_blk_get_features(VirtIODevice
*vdev
)
260 return (1 << VIRTIO_BLK_F_SEG_MAX
| 1 << VIRTIO_BLK_F_GEOMETRY
);
263 static void virtio_blk_save(QEMUFile
*f
, void *opaque
)
265 VirtIOBlock
*s
= opaque
;
266 VirtIOBlockReq
*req
= s
->rq
;
268 virtio_save(&s
->vdev
, f
);
271 qemu_put_sbyte(f
, 1);
272 qemu_put_buffer(f
, (unsigned char*)&req
->elem
, sizeof(req
->elem
));
275 qemu_put_sbyte(f
, 0);
278 static int virtio_blk_load(QEMUFile
*f
, void *opaque
, int version_id
)
280 VirtIOBlock
*s
= opaque
;
285 virtio_load(&s
->vdev
, f
);
286 while (qemu_get_sbyte(f
)) {
287 VirtIOBlockReq
*req
= virtio_blk_alloc_request(s
);
288 qemu_get_buffer(f
, (unsigned char*)&req
->elem
, sizeof(req
->elem
));
296 void *virtio_blk_init(PCIBus
*bus
, BlockDriverState
*bs
)
299 int cylinders
, heads
, secs
;
300 static int virtio_blk_id
;
302 s
= (VirtIOBlock
*)virtio_init_pci(bus
, "virtio-blk",
303 PCI_VENDOR_ID_REDHAT_QUMRANET
,
304 PCI_DEVICE_ID_VIRTIO_BLOCK
,
305 PCI_VENDOR_ID_REDHAT_QUMRANET
,
307 PCI_CLASS_STORAGE_OTHER
, 0x00,
308 sizeof(struct virtio_blk_config
), sizeof(VirtIOBlock
));
312 s
->vdev
.get_config
= virtio_blk_update_config
;
313 s
->vdev
.get_features
= virtio_blk_get_features
;
314 s
->vdev
.reset
= virtio_blk_reset
;
317 bs
->devfn
= s
->vdev
.pci_dev
.devfn
;
318 bdrv_guess_geometry(s
->bs
, &cylinders
, &heads
, &secs
);
319 bdrv_set_geometry_hint(s
->bs
, cylinders
, heads
, secs
);
321 s
->vq
= virtio_add_queue(&s
->vdev
, 128, virtio_blk_handle_output
);
323 qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb
, s
);
324 register_savevm("virtio-blk", virtio_blk_id
++, 2,
325 virtio_blk_save
, virtio_blk_load
, s
);