5 #include <qemu-common.h>
7 #include "qemu-vio-blk.h"
13 extern char *progname
;
14 extern unsigned long int num_reads
;
15 extern unsigned long int num_writes
;
16 extern unsigned long int num_scsi_cmds
;
17 extern unsigned long int num_flushs
;
18 extern unsigned long int bytes_wrote
;
19 extern unsigned long int bytes_read
;
21 char multiwrite_async_ret
= 0;
24 //#define trace() printf("qemu-vio-blk: %s-%s: %s(%d)\n",__TIME__, __FILE__,__func__, __LINE__)
27 static void qemu_vio_blk_req_complete(VirtIOBlockReq
*req
, int status
)
30 if (req
->out
->type
& VIRTIO_BLK_T_FLUSH
) {
31 bytes_wrote
+=req
->qiov
.size
;
32 } else if (req
->out
->type
& VIRTIO_BLK_T_SCSI_CMD
) {
33 bytes_wrote
+=req
->qiov
.size
;
34 } else if (req
->out
->type
& VIRTIO_BLK_T_OUT
) {
35 bytes_wrote
+=req
->qiov
.size
;
37 bytes_read
+=req
->qiov
.size
;
40 VirtIOBlock
*s
= req
->dev
;
42 req
->in
->status
= status
;
43 virtqueue_push(s
->vq
, &req
->elem
, req
->qiov
.size
+ sizeof(*req
->in
));
49 static void qemu_vio_blk_flush_complete(void *opaque
, int ret
)
51 VirtIOBlockReq
*req
= opaque
;
53 qemu_vio_blk_req_complete(req
, ret
? VIRTIO_BLK_S_IOERR
: VIRTIO_BLK_S_OK
);
57 static void qemu_vio_blk_handle_scsi(VirtIOBlockReq
*req
)
65 * We require at least one output segment each for the virtio_blk_outhdr
66 * and the SCSI command block.
68 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
69 * and the sense buffer pointer in the input segments.
71 if (req
->elem
.out_num
< 2 || req
->elem
.in_num
< 3) {
72 qemu_vio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
77 * No support for bidirection commands yet.
79 if (req
->elem
.out_num
> 2 && req
->elem
.in_num
> 3) {
80 qemu_vio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
85 * The scsi inhdr is placed in the second-to-last input segment, just
86 * before the regular inhdr.
88 req
->scsi
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 2].iov_base
;
89 size
= sizeof(*req
->in
) + sizeof(*req
->scsi
);
91 memset(&hdr
, 0, sizeof(struct sg_io_hdr
));
92 hdr
.interface_id
= 'S';
93 hdr
.cmd_len
= req
->elem
.out_sg
[1].iov_len
;
94 hdr
.cmdp
= req
->elem
.out_sg
[1].iov_base
;
97 if (req
->elem
.out_num
> 2) {
99 * If there are more than the minimally required 2 output segments
100 * there is write payload starting from the third iovec.
102 hdr
.dxfer_direction
= SG_DXFER_TO_DEV
;
103 hdr
.iovec_count
= req
->elem
.out_num
- 2;
105 for (i
= 0; i
< hdr
.iovec_count
; i
++)
106 hdr
.dxfer_len
+= req
->elem
.out_sg
[i
+ 2].iov_len
;
108 hdr
.dxferp
= req
->elem
.out_sg
+ 2;
110 } else if (req
->elem
.in_num
> 3) {
112 * If we have more than 3 input segments the guest wants to actually
115 hdr
.dxfer_direction
= SG_DXFER_FROM_DEV
;
116 hdr
.iovec_count
= req
->elem
.in_num
- 3;
117 for (i
= 0; i
< hdr
.iovec_count
; i
++)
118 hdr
.dxfer_len
+= req
->elem
.in_sg
[i
].iov_len
;
120 hdr
.dxferp
= req
->elem
.in_sg
;
121 size
+= hdr
.dxfer_len
;
124 * Some SCSI commands don't actually transfer any data.
126 hdr
.dxfer_direction
= SG_DXFER_NONE
;
129 hdr
.sbp
= req
->elem
.in_sg
[req
->elem
.in_num
- 3].iov_base
;
130 hdr
.mx_sb_len
= req
->elem
.in_sg
[req
->elem
.in_num
- 3].iov_len
;
131 size
+= hdr
.mx_sb_len
;
133 ret
= bdrv_ioctl(req
->dev
->bs
, SG_IO
, &hdr
);
135 status
= VIRTIO_BLK_S_UNSUPP
;
137 hdr
.resid
= hdr
.dxfer_len
;
138 } else if (hdr
.status
) {
139 status
= VIRTIO_BLK_S_IOERR
;
141 status
= VIRTIO_BLK_S_OK
;
144 req
->scsi
->errors
= hdr
.status
;
145 req
->scsi
->residual
= hdr
.resid
;
146 req
->scsi
->sense_len
= hdr
.sb_len_wr
;
147 req
->scsi
->data_len
= hdr
.dxfer_len
;
149 qemu_vio_blk_req_complete(req
, status
);
152 static void qemu_vio_blk_handle_scsi(VirtIOBlockReq
*req
)
154 qemu_vio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
156 #endif /* __linux__ */
158 BlockErrorAction
drive_get_on_error(
159 BlockDriverState
*bdrv
, int is_read
)
165 QTAILQ_FOREACH(dinfo, &drives, next) {
166 if (dinfo->bdrv == bdrv)
167 return is_read ? dinfo->on_read_error : dinfo->on_write_error;
170 return is_read ? BLOCK_ERR_REPORT : BLOCK_ERR_STOP_ENOSPC;
172 return BLOCK_ERR_REPORT
;
175 static void q_vm_stop(int reason
)
181 static int qemu_vio_blk_handle_rw_error(VirtIOBlockReq
*req
, int error
,
185 BlockErrorAction action
=
186 drive_get_on_error(req
->dev
->bs
, is_read
);
187 VirtIOBlock
*s
= req
->dev
;
190 if (action
== BLOCK_ERR_IGNORE
)
193 if ((error
== ENOSPC
&& action
== BLOCK_ERR_STOP_ENOSPC
)
194 || action
== BLOCK_ERR_STOP_ANY
) {
200 qemu_vio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
208 static void qemu_vio_blk_rw_complete(void *opaque
, int ret
)
210 VirtIOBlockReq
*req
= opaque
;
211 int is_read
= !(req
->out
->type
& VIRTIO_BLK_T_OUT
);
216 multiwrite_async_ret
++;
219 if (qemu_vio_blk_handle_rw_error(req
, -ret
, is_read
))
223 qemu_vio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
226 void do_aio_multiwrite(BlockDriverState
*bs
, MultiReqBuffer
*mrb
)
231 if (!mrb
->num_writes
) {
236 ret
= bdrv_aio_multiwrite(bs
, mrb
->blkreq
, mrb
->num_writes
);
240 for (i
= 0; i
< mrb
->num_writes
; i
++) {
241 if (mrb
->blkreq
[i
].error
) {
242 qemu_vio_blk_rw_complete(mrb
->blkreq
[i
].opaque
, -EIO
);
248 while (multiwrite_async_ret
< mrb
->num_writes
)
252 multiwrite_async_ret
= 0;
257 void do_multiwrite(BlockDriverState
*bs
, BlockRequest
*blkreq
,
263 for (i
= 0; i
< num_writes
; i
++) {
265 ret
= bdrv_write(bs
,blkreq
[i
].sector
,blkreq
[i
].qiov
->iov
->iov_base
,blkreq
[i
].nb_sectors
);
266 blkreq
[i
].cb(blkreq
[i
].opaque
,ret
);
272 static void qemu_vio_blk_handle_write(VirtIOBlockReq
*req
, MultiReqBuffer
*mrb
)
274 BlockRequest
*blkreq
;
276 /* if (req->out->sector & req->dev->sector_mask) {
277 virtio_blk_rw_complete(req, -EIO);
281 if (mrb
->num_writes
== 32) {
282 async_context_push();
283 do_aio_multiwrite(req
->dev
->bs
, mrb
);
285 mrb
->old_bs
= req
->dev
->bs
;
288 blkreq
= &mrb
->blkreq
[mrb
->num_writes
];
289 blkreq
->sector
= req
->out
->sector
;
290 blkreq
->nb_sectors
= req
->qiov
.size
/ 512;
291 blkreq
->qiov
= &req
->qiov
;
292 blkreq
->cb
= qemu_vio_blk_rw_complete
;
293 blkreq
->opaque
= req
;
299 static void qemu_vio_blk_handle_read(VirtIOBlockReq
*req
)
301 BlockDriverAIOCB
*acb
;
304 req
->in
->status
= WAITING_ASYNC_RET
;
306 acb
= bdrv_aio_readv(req
->dev
->bs
, req
->out
->sector
, &req
->qiov
,
307 req
->qiov
.size
/ 512, qemu_vio_blk_rw_complete
, req
);
311 qemu_vio_blk_rw_complete(req
, -EIO
);
315 while (req
->in
->status
== WAITING_ASYNC_RET
)
322 static void qemu_vio_blk_handle_flush(BlockRequest
*blkreq
, int *num_writes
,
323 VirtIOBlockReq
*req
, BlockDriverState
**old_bs
)
325 BlockDriverAIOCB
*acb
;
328 * Make sure all outstanding writes are posted to the backing device.
330 if (*old_bs
!= NULL
) {
332 do_multiwrite(*old_bs
, blkreq
, *num_writes
);
335 *old_bs
= req
->dev
->bs
;
337 acb
= bdrv_aio_flush(req
->dev
->bs
, qemu_vio_blk_flush_complete
, req
);
339 qemu_vio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
343 VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
)
346 VirtIOBlockReq
*req
= malloc(sizeof(VirtIOBlockReq
));
350 bzero(req
,sizeof(VirtIOBlockReq
));
353 req
->dev
->bs
= s
->bs
;
356 if (!virtqueue_pop(s
->vq
, &req
->elem
)) {
366 void virtio_blk_handle_request(VirtIOBlockReq
*req
,
371 if (req
->elem
.out_num
< 1 || req
->elem
.in_num
< 1) {
372 fprintf(stderr
, "qemu-vio-blk: missing headers\n");
376 if (req
->elem
.out_sg
[0].iov_len
< sizeof(*req
->out
) ||
377 req
->elem
.in_sg
[req
->elem
.in_num
- 1].iov_len
< sizeof(*req
->in
)) {
378 fprintf(stderr
, "qemu-vio-blk: header not in correct element\n");
382 req
->out
= (void *)req
->elem
.out_sg
[0].iov_base
;
383 req
->in
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 1].iov_base
;
385 if (req
->out
->type
& VIRTIO_BLK_T_FLUSH
) {
387 qemu_vio_blk_handle_flush(mrb
->blkreq
, &mrb
->num_writes
,
389 } else if (req
->out
->type
& VIRTIO_BLK_T_SCSI_CMD
) {
391 qemu_vio_blk_handle_scsi(req
);
392 } else if (req
->out
->type
& VIRTIO_BLK_T_OUT
) {
394 qemu_iovec_init_external(&req
->qiov
, &req
->elem
.out_sg
[1],
395 req
->elem
.out_num
- 1);
396 qemu_vio_blk_handle_write(req
, mrb
);
399 qemu_iovec_init_external(&req
->qiov
, &req
->elem
.in_sg
[0],
400 req
->elem
.in_num
- 1);
401 async_context_push();
402 qemu_vio_blk_handle_read(req
);
407 void qemu_vio_guess_geometry(BlockDriverState
*bs
, int *pcyls
, int *pheads
, int *psecs
)
409 bdrv_guess_geometry(bs
, pcyls
, pheads
, psecs
);
412 int64_t qemu_vio_getlength(BlockDriverState
*bs
)
414 return bdrv_getlength(bs
) / 512;