2 * Copyright (c) 2018 Citrix Systems Inc.
3 * (c) Gerd Hoffmann <kraxel@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; under version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 * Contributions after 2012-01-13 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
21 #include "qemu/osdep.h"
22 #include "qemu/defer-call.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qemu/memalign.h"
26 #include "qapi/error.h"
27 #include "hw/xen/xen.h"
28 #include "hw/block/xen_blkif.h"
29 #include "hw/xen/interface/io/ring.h"
30 #include "sysemu/block-backend.h"
31 #include "sysemu/iothread.h"
32 #include "xen-block.h"
34 typedef struct XenBlockRequest
{
44 XenBlockDataPlane
*dataplane
;
45 QLIST_ENTRY(XenBlockRequest
) list
;
49 struct XenBlockDataPlane
{
51 XenEventChannel
*event_channel
;
52 unsigned int *ring_ref
;
53 unsigned int nr_ring_ref
;
56 blkif_back_rings_t rings
;
58 QLIST_HEAD(inflight_head
, XenBlockRequest
) inflight
;
59 QLIST_HEAD(freelist_head
, XenBlockRequest
) freelist
;
61 int requests_inflight
;
62 unsigned int max_requests
;
64 unsigned int sector_size
;
70 static int xen_block_send_response(XenBlockRequest
*request
);
72 static void reset_request(XenBlockRequest
*request
)
74 memset(&request
->req
, 0, sizeof(request
->req
));
80 request
->aio_inflight
= 0;
81 request
->aio_errors
= 0;
83 request
->dataplane
= NULL
;
84 memset(&request
->list
, 0, sizeof(request
->list
));
85 memset(&request
->acct
, 0, sizeof(request
->acct
));
87 qemu_iovec_reset(&request
->v
);
90 static XenBlockRequest
*xen_block_start_request(XenBlockDataPlane
*dataplane
)
92 XenBlockRequest
*request
= NULL
;
94 if (QLIST_EMPTY(&dataplane
->freelist
)) {
95 if (dataplane
->requests_total
>= dataplane
->max_requests
) {
98 /* allocate new struct */
99 request
= g_malloc0(sizeof(*request
));
100 request
->dataplane
= dataplane
;
102 * We cannot need more pages per requests than this, and since we
103 * re-use requests, allocate the memory once here. It will be freed
104 * xen_block_dataplane_destroy() when the request list is freed.
106 request
->buf
= qemu_memalign(XEN_PAGE_SIZE
,
107 BLKIF_MAX_SEGMENTS_PER_REQUEST
*
109 dataplane
->requests_total
++;
110 qemu_iovec_init(&request
->v
, 1);
112 /* get one from freelist */
113 request
= QLIST_FIRST(&dataplane
->freelist
);
114 QLIST_REMOVE(request
, list
);
116 QLIST_INSERT_HEAD(&dataplane
->inflight
, request
, list
);
117 dataplane
->requests_inflight
++;
123 static void xen_block_complete_request(XenBlockRequest
*request
)
125 XenBlockDataPlane
*dataplane
= request
->dataplane
;
127 if (xen_block_send_response(request
)) {
128 Error
*local_err
= NULL
;
130 xen_device_notify_event_channel(dataplane
->xendev
,
131 dataplane
->event_channel
,
134 error_report_err(local_err
);
138 QLIST_REMOVE(request
, list
);
139 dataplane
->requests_inflight
--;
140 reset_request(request
);
141 request
->dataplane
= dataplane
;
142 QLIST_INSERT_HEAD(&dataplane
->freelist
, request
, list
);
146 * translate request into iovec + start offset
147 * do sanity checks along the way
149 static int xen_block_parse_request(XenBlockRequest
*request
)
151 XenBlockDataPlane
*dataplane
= request
->dataplane
;
155 switch (request
->req
.operation
) {
158 case BLKIF_OP_FLUSH_DISKCACHE
:
159 request
->presync
= 1;
160 if (!request
->req
.nr_segments
) {
166 case BLKIF_OP_DISCARD
:
169 error_report("error: unknown operation (%d)", request
->req
.operation
);
173 if (request
->req
.operation
!= BLKIF_OP_READ
&&
174 !blk_is_writable(dataplane
->blk
)) {
175 error_report("error: write req for ro device");
179 request
->start
= request
->req
.sector_number
* dataplane
->sector_size
;
180 for (i
= 0; i
< request
->req
.nr_segments
; i
++) {
181 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
182 error_report("error: nr_segments too big");
185 if (request
->req
.seg
[i
].first_sect
> request
->req
.seg
[i
].last_sect
) {
186 error_report("error: first > last sector");
189 if (request
->req
.seg
[i
].last_sect
* dataplane
->sector_size
>=
191 error_report("error: page crossing");
195 len
= (request
->req
.seg
[i
].last_sect
-
196 request
->req
.seg
[i
].first_sect
+ 1) * dataplane
->sector_size
;
197 request
->size
+= len
;
199 if (request
->start
+ request
->size
> blk_getlength(dataplane
->blk
)) {
200 error_report("error: access beyond end of file");
206 request
->status
= BLKIF_RSP_ERROR
;
210 static int xen_block_copy_request(XenBlockRequest
*request
)
212 XenBlockDataPlane
*dataplane
= request
->dataplane
;
213 XenDevice
*xendev
= dataplane
->xendev
;
214 XenDeviceGrantCopySegment segs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
216 bool to_domain
= (request
->req
.operation
== BLKIF_OP_READ
);
217 void *virt
= request
->buf
;
218 Error
*local_err
= NULL
;
220 if (request
->req
.nr_segments
== 0) {
224 count
= request
->req
.nr_segments
;
226 for (i
= 0; i
< count
; i
++) {
228 segs
[i
].dest
.foreign
.ref
= request
->req
.seg
[i
].gref
;
229 segs
[i
].dest
.foreign
.offset
= request
->req
.seg
[i
].first_sect
*
230 dataplane
->sector_size
;
231 segs
[i
].source
.virt
= virt
;
233 segs
[i
].source
.foreign
.ref
= request
->req
.seg
[i
].gref
;
234 segs
[i
].source
.foreign
.offset
= request
->req
.seg
[i
].first_sect
*
235 dataplane
->sector_size
;
236 segs
[i
].dest
.virt
= virt
;
238 segs
[i
].len
= (request
->req
.seg
[i
].last_sect
-
239 request
->req
.seg
[i
].first_sect
+ 1) *
240 dataplane
->sector_size
;
244 xen_device_copy_grant_refs(xendev
, to_domain
, segs
, count
, &local_err
);
247 error_reportf_err(local_err
, "failed to copy data: ");
249 request
->aio_errors
++;
256 static int xen_block_do_aio(XenBlockRequest
*request
);
258 static void xen_block_complete_aio(void *opaque
, int ret
)
260 XenBlockRequest
*request
= opaque
;
261 XenBlockDataPlane
*dataplane
= request
->dataplane
;
264 error_report("%s I/O error",
265 request
->req
.operation
== BLKIF_OP_READ
?
267 request
->aio_errors
++;
270 request
->aio_inflight
--;
271 if (request
->presync
) {
272 request
->presync
= 0;
273 xen_block_do_aio(request
);
276 if (request
->aio_inflight
> 0) {
280 switch (request
->req
.operation
) {
282 /* in case of failure request->aio_errors is increased */
284 xen_block_copy_request(request
);
288 case BLKIF_OP_FLUSH_DISKCACHE
:
293 request
->status
= request
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
295 switch (request
->req
.operation
) {
297 case BLKIF_OP_FLUSH_DISKCACHE
:
298 if (!request
->req
.nr_segments
) {
303 if (request
->status
== BLKIF_RSP_OKAY
) {
304 block_acct_done(blk_get_stats(dataplane
->blk
), &request
->acct
);
306 block_acct_failed(blk_get_stats(dataplane
->blk
), &request
->acct
);
309 case BLKIF_OP_DISCARD
:
314 xen_block_complete_request(request
);
316 if (dataplane
->more_work
) {
317 qemu_bh_schedule(dataplane
->bh
);
321 static bool xen_block_split_discard(XenBlockRequest
*request
,
322 blkif_sector_t sector_number
,
325 XenBlockDataPlane
*dataplane
= request
->dataplane
;
328 uint64_t byte_remaining
;
329 uint64_t sec_start
= sector_number
;
330 uint64_t sec_count
= nr_sectors
;
332 /* Wrap around, or overflowing byte limit? */
333 if (sec_start
+ sec_count
< sec_count
||
334 sec_start
+ sec_count
> INT64_MAX
/ dataplane
->sector_size
) {
338 byte_offset
= sec_start
* dataplane
->sector_size
;
339 byte_remaining
= sec_count
* dataplane
->sector_size
;
342 byte_chunk
= byte_remaining
> BDRV_REQUEST_MAX_BYTES
?
343 BDRV_REQUEST_MAX_BYTES
: byte_remaining
;
344 request
->aio_inflight
++;
345 blk_aio_pdiscard(dataplane
->blk
, byte_offset
, byte_chunk
,
346 xen_block_complete_aio
, request
);
347 byte_remaining
-= byte_chunk
;
348 byte_offset
+= byte_chunk
;
349 } while (byte_remaining
> 0);
354 static int xen_block_do_aio(XenBlockRequest
*request
)
356 XenBlockDataPlane
*dataplane
= request
->dataplane
;
358 if (request
->req
.nr_segments
&&
359 (request
->req
.operation
== BLKIF_OP_WRITE
||
360 request
->req
.operation
== BLKIF_OP_FLUSH_DISKCACHE
) &&
361 xen_block_copy_request(request
)) {
365 request
->aio_inflight
++;
366 if (request
->presync
) {
367 blk_aio_flush(request
->dataplane
->blk
, xen_block_complete_aio
,
372 switch (request
->req
.operation
) {
374 qemu_iovec_add(&request
->v
, request
->buf
, request
->size
);
375 block_acct_start(blk_get_stats(dataplane
->blk
), &request
->acct
,
376 request
->v
.size
, BLOCK_ACCT_READ
);
377 request
->aio_inflight
++;
378 blk_aio_preadv(dataplane
->blk
, request
->start
, &request
->v
, 0,
379 xen_block_complete_aio
, request
);
382 case BLKIF_OP_FLUSH_DISKCACHE
:
383 if (!request
->req
.nr_segments
) {
387 qemu_iovec_add(&request
->v
, request
->buf
, request
->size
);
388 block_acct_start(blk_get_stats(dataplane
->blk
), &request
->acct
,
390 request
->req
.operation
== BLKIF_OP_WRITE
?
391 BLOCK_ACCT_WRITE
: BLOCK_ACCT_FLUSH
);
392 request
->aio_inflight
++;
393 blk_aio_pwritev(dataplane
->blk
, request
->start
, &request
->v
, 0,
394 xen_block_complete_aio
, request
);
396 case BLKIF_OP_DISCARD
:
398 struct blkif_request_discard
*req
= (void *)&request
->req
;
399 if (!xen_block_split_discard(request
, req
->sector_number
,
406 /* unknown operation (shouldn't happen -- parse catches this) */
410 xen_block_complete_aio(request
, 0);
415 request
->status
= BLKIF_RSP_ERROR
;
416 xen_block_complete_request(request
);
420 static int xen_block_send_response(XenBlockRequest
*request
)
422 XenBlockDataPlane
*dataplane
= request
->dataplane
;
424 int have_requests
= 0;
425 blkif_response_t
*resp
;
427 /* Place on the response ring for the relevant domain. */
428 switch (dataplane
->protocol
) {
429 case BLKIF_PROTOCOL_NATIVE
:
430 resp
= (blkif_response_t
*)RING_GET_RESPONSE(
431 &dataplane
->rings
.native
,
432 dataplane
->rings
.native
.rsp_prod_pvt
);
434 case BLKIF_PROTOCOL_X86_32
:
435 resp
= (blkif_response_t
*)RING_GET_RESPONSE(
436 &dataplane
->rings
.x86_32_part
,
437 dataplane
->rings
.x86_32_part
.rsp_prod_pvt
);
439 case BLKIF_PROTOCOL_X86_64
:
440 resp
= (blkif_response_t
*)RING_GET_RESPONSE(
441 &dataplane
->rings
.x86_64_part
,
442 dataplane
->rings
.x86_64_part
.rsp_prod_pvt
);
448 resp
->id
= request
->req
.id
;
449 resp
->operation
= request
->req
.operation
;
450 resp
->status
= request
->status
;
452 dataplane
->rings
.common
.rsp_prod_pvt
++;
454 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane
->rings
.common
,
456 if (dataplane
->rings
.common
.rsp_prod_pvt
==
457 dataplane
->rings
.common
.req_cons
) {
459 * Tail check for pending requests. Allows frontend to avoid
460 * notifications if requests are already in flight (lower
461 * overheads and promotes batching).
463 RING_FINAL_CHECK_FOR_REQUESTS(&dataplane
->rings
.common
,
465 } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane
->rings
.common
)) {
470 dataplane
->more_work
++;
475 static int xen_block_get_request(XenBlockDataPlane
*dataplane
,
476 XenBlockRequest
*request
, RING_IDX rc
)
478 switch (dataplane
->protocol
) {
479 case BLKIF_PROTOCOL_NATIVE
: {
480 blkif_request_t
*req
=
481 RING_GET_REQUEST(&dataplane
->rings
.native
, rc
);
483 memcpy(&request
->req
, req
, sizeof(request
->req
));
486 case BLKIF_PROTOCOL_X86_32
: {
487 blkif_x86_32_request_t
*req
=
488 RING_GET_REQUEST(&dataplane
->rings
.x86_32_part
, rc
);
490 blkif_get_x86_32_req(&request
->req
, req
);
493 case BLKIF_PROTOCOL_X86_64
: {
494 blkif_x86_64_request_t
*req
=
495 RING_GET_REQUEST(&dataplane
->rings
.x86_64_part
, rc
);
497 blkif_get_x86_64_req(&request
->req
, req
);
501 /* Prevent the compiler from accessing the on-ring fields instead. */
507 * Threshold of in-flight requests above which we will start using
508 * defer_call_begin()/defer_call_end() to batch requests.
510 #define IO_PLUG_THRESHOLD 1
512 static bool xen_block_handle_requests(XenBlockDataPlane
*dataplane
)
515 XenBlockRequest
*request
;
516 int inflight_atstart
= dataplane
->requests_inflight
;
518 bool done_something
= false;
520 dataplane
->more_work
= 0;
522 rc
= dataplane
->rings
.common
.req_cons
;
523 rp
= dataplane
->rings
.common
.sring
->req_prod
;
524 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
527 * If there was more than IO_PLUG_THRESHOLD requests in flight
528 * when we got here, this is an indication that there the bottleneck
529 * is below us, so it's worth beginning to batch up I/O requests
530 * rather than submitting them immediately. The maximum number
531 * of requests we're willing to batch is the number already in
532 * flight, so it can grow up to max_requests when the bottleneck
535 if (inflight_atstart
> IO_PLUG_THRESHOLD
) {
539 /* pull request from ring */
540 if (RING_REQUEST_CONS_OVERFLOW(&dataplane
->rings
.common
, rc
)) {
543 request
= xen_block_start_request(dataplane
);
544 if (request
== NULL
) {
545 dataplane
->more_work
++;
548 xen_block_get_request(dataplane
, request
, rc
);
549 dataplane
->rings
.common
.req_cons
= ++rc
;
550 done_something
= true;
553 if (xen_block_parse_request(request
) != 0) {
554 switch (request
->req
.operation
) {
556 block_acct_invalid(blk_get_stats(dataplane
->blk
),
560 block_acct_invalid(blk_get_stats(dataplane
->blk
),
563 case BLKIF_OP_FLUSH_DISKCACHE
:
564 block_acct_invalid(blk_get_stats(dataplane
->blk
),
570 xen_block_complete_request(request
);
574 if (inflight_atstart
> IO_PLUG_THRESHOLD
&&
575 batched
>= inflight_atstart
) {
578 xen_block_do_aio(request
);
579 if (inflight_atstart
> IO_PLUG_THRESHOLD
) {
580 if (batched
>= inflight_atstart
) {
588 if (inflight_atstart
> IO_PLUG_THRESHOLD
) {
592 return done_something
;
595 static void xen_block_dataplane_bh(void *opaque
)
597 XenBlockDataPlane
*dataplane
= opaque
;
599 xen_block_handle_requests(dataplane
);
602 static bool xen_block_dataplane_event(void *opaque
)
604 XenBlockDataPlane
*dataplane
= opaque
;
606 return xen_block_handle_requests(dataplane
);
609 XenBlockDataPlane
*xen_block_dataplane_create(XenDevice
*xendev
,
611 unsigned int sector_size
,
614 XenBlockDataPlane
*dataplane
= g_new0(XenBlockDataPlane
, 1);
616 dataplane
->xendev
= xendev
;
617 dataplane
->blk
= blk
;
618 dataplane
->sector_size
= sector_size
;
620 QLIST_INIT(&dataplane
->inflight
);
621 QLIST_INIT(&dataplane
->freelist
);
624 dataplane
->iothread
= iothread
;
625 object_ref(OBJECT(dataplane
->iothread
));
626 dataplane
->ctx
= iothread_get_aio_context(dataplane
->iothread
);
628 dataplane
->ctx
= qemu_get_aio_context();
630 dataplane
->bh
= aio_bh_new_guarded(dataplane
->ctx
, xen_block_dataplane_bh
,
632 &DEVICE(xendev
)->mem_reentrancy_guard
);
637 void xen_block_dataplane_destroy(XenBlockDataPlane
*dataplane
)
639 XenBlockRequest
*request
;
645 while (!QLIST_EMPTY(&dataplane
->freelist
)) {
646 request
= QLIST_FIRST(&dataplane
->freelist
);
647 QLIST_REMOVE(request
, list
);
648 qemu_iovec_destroy(&request
->v
);
649 qemu_vfree(request
->buf
);
653 qemu_bh_delete(dataplane
->bh
);
654 if (dataplane
->iothread
) {
655 object_unref(OBJECT(dataplane
->iothread
));
661 void xen_block_dataplane_detach(XenBlockDataPlane
*dataplane
)
663 if (!dataplane
|| !dataplane
->event_channel
) {
667 /* Only reason for failure is a NULL channel */
668 xen_device_set_event_channel_context(dataplane
->xendev
,
669 dataplane
->event_channel
,
673 void xen_block_dataplane_attach(XenBlockDataPlane
*dataplane
)
675 if (!dataplane
|| !dataplane
->event_channel
) {
679 /* Only reason for failure is a NULL channel */
680 xen_device_set_event_channel_context(dataplane
->xendev
,
681 dataplane
->event_channel
,
682 dataplane
->ctx
, &error_abort
);
685 void xen_block_dataplane_stop(XenBlockDataPlane
*dataplane
)
693 xendev
= dataplane
->xendev
;
695 if (!blk_in_drain(dataplane
->blk
)) {
696 xen_block_dataplane_detach(dataplane
);
699 /* Xen doesn't have multiple users for nodes, so this can't fail */
700 blk_set_aio_context(dataplane
->blk
, qemu_get_aio_context(), &error_abort
);
703 * Now that the context has been moved onto the main thread, cancel
704 * further processing.
706 qemu_bh_cancel(dataplane
->bh
);
708 if (dataplane
->event_channel
) {
709 Error
*local_err
= NULL
;
711 xen_device_unbind_event_channel(xendev
, dataplane
->event_channel
,
713 dataplane
->event_channel
= NULL
;
716 error_report_err(local_err
);
720 if (dataplane
->sring
) {
721 Error
*local_err
= NULL
;
723 xen_device_unmap_grant_refs(xendev
, dataplane
->sring
,
725 dataplane
->nr_ring_ref
, &local_err
);
726 dataplane
->sring
= NULL
;
729 error_report_err(local_err
);
733 g_free(dataplane
->ring_ref
);
734 dataplane
->ring_ref
= NULL
;
737 void xen_block_dataplane_start(XenBlockDataPlane
*dataplane
,
738 const unsigned int ring_ref
[],
739 unsigned int nr_ring_ref
,
740 unsigned int event_channel
,
741 unsigned int protocol
,
745 XenDevice
*xendev
= dataplane
->xendev
;
746 unsigned int ring_size
;
749 dataplane
->nr_ring_ref
= nr_ring_ref
;
750 dataplane
->ring_ref
= g_new(unsigned int, nr_ring_ref
);
752 for (i
= 0; i
< nr_ring_ref
; i
++) {
753 dataplane
->ring_ref
[i
] = ring_ref
[i
];
756 dataplane
->protocol
= protocol
;
758 ring_size
= XEN_PAGE_SIZE
* dataplane
->nr_ring_ref
;
759 switch (dataplane
->protocol
) {
760 case BLKIF_PROTOCOL_NATIVE
:
762 dataplane
->max_requests
= __CONST_RING_SIZE(blkif
, ring_size
);
765 case BLKIF_PROTOCOL_X86_32
:
767 dataplane
->max_requests
= __CONST_RING_SIZE(blkif_x86_32
, ring_size
);
770 case BLKIF_PROTOCOL_X86_64
:
772 dataplane
->max_requests
= __CONST_RING_SIZE(blkif_x86_64
, ring_size
);
776 error_setg(errp
, "unknown protocol %u", dataplane
->protocol
);
780 xen_device_set_max_grant_refs(xendev
, dataplane
->nr_ring_ref
,
786 dataplane
->sring
= xen_device_map_grant_refs(xendev
,
788 dataplane
->nr_ring_ref
,
789 PROT_READ
| PROT_WRITE
,
795 switch (dataplane
->protocol
) {
796 case BLKIF_PROTOCOL_NATIVE
:
798 blkif_sring_t
*sring_native
= dataplane
->sring
;
800 BACK_RING_INIT(&dataplane
->rings
.native
, sring_native
, ring_size
);
803 case BLKIF_PROTOCOL_X86_32
:
805 blkif_x86_32_sring_t
*sring_x86_32
= dataplane
->sring
;
807 BACK_RING_INIT(&dataplane
->rings
.x86_32_part
, sring_x86_32
,
811 case BLKIF_PROTOCOL_X86_64
:
813 blkif_x86_64_sring_t
*sring_x86_64
= dataplane
->sring
;
815 BACK_RING_INIT(&dataplane
->rings
.x86_64_part
, sring_x86_64
,
821 dataplane
->event_channel
=
822 xen_device_bind_event_channel(xendev
, event_channel
,
823 xen_block_dataplane_event
, dataplane
,
829 /* If other users keep the BlockBackend in the iothread, that's ok */
830 blk_set_aio_context(dataplane
->blk
, dataplane
->ctx
, NULL
);
832 if (!blk_in_drain(dataplane
->blk
)) {
833 xen_block_dataplane_attach(dataplane
);
839 xen_block_dataplane_stop(dataplane
);