4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
11 #include "block_int.h"
12 #include "cache-utils.h"
14 static AIOPool dma_aio_pool
;
16 void qemu_sglist_init(QEMUSGList
*qsg
, int alloc_hint
)
18 qsg
->sg
= qemu_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
20 qsg
->nalloc
= alloc_hint
;
24 void qemu_sglist_add(QEMUSGList
*qsg
, target_phys_addr_t base
,
25 target_phys_addr_t len
)
27 if (qsg
->nsg
== qsg
->nalloc
) {
28 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
29 qsg
->sg
= qemu_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
31 qsg
->sg
[qsg
->nsg
].base
= base
;
32 qsg
->sg
[qsg
->nsg
].len
= len
;
37 void qemu_sglist_destroy(QEMUSGList
*qsg
)
43 BlockDriverAIOCB common
;
45 BlockDriverAIOCB
*acb
;
50 target_phys_addr_t sg_cur_byte
;
55 static void dma_bdrv_cb(void *opaque
, int ret
);
57 static void reschedule_dma(void *opaque
)
59 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
61 qemu_bh_delete(dbs
->bh
);
63 dma_bdrv_cb(opaque
, 0);
66 static void continue_after_map_failure(void *opaque
)
68 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
70 dbs
->bh
= qemu_bh_new(reschedule_dma
, dbs
);
71 qemu_bh_schedule(dbs
->bh
);
74 static void dma_bdrv_unmap(DMAAIOCB
*dbs
)
78 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
79 cpu_physical_memory_unmap(dbs
->iov
.iov
[i
].iov_base
,
80 dbs
->iov
.iov
[i
].iov_len
, !dbs
->is_write
,
81 dbs
->iov
.iov
[i
].iov_len
);
85 static void dma_bdrv_cb(void *opaque
, int ret
)
87 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
88 target_phys_addr_t cur_addr
, cur_len
;
92 dbs
->sector_num
+= dbs
->iov
.size
/ 512;
94 qemu_iovec_reset(&dbs
->iov
);
96 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
97 dbs
->common
.cb(dbs
->common
.opaque
, ret
);
98 qemu_iovec_destroy(&dbs
->iov
);
99 qemu_aio_release(dbs
);
103 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
104 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
105 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
106 mem
= cpu_physical_memory_map(cur_addr
, &cur_len
, !dbs
->is_write
);
109 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
110 dbs
->sg_cur_byte
+= cur_len
;
111 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
112 dbs
->sg_cur_byte
= 0;
117 if (dbs
->iov
.size
== 0) {
118 cpu_register_map_client(dbs
, continue_after_map_failure
);
123 dbs
->acb
= bdrv_aio_writev(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
124 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
126 dbs
->acb
= bdrv_aio_readv(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
127 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
131 qemu_iovec_destroy(&dbs
->iov
);
136 static BlockDriverAIOCB
*dma_bdrv_io(
137 BlockDriverState
*bs
, QEMUSGList
*sg
, uint64_t sector_num
,
138 BlockDriverCompletionFunc
*cb
, void *opaque
,
143 DMAAIOCB
*dbs
= qemu_aio_get_pool(&dma_aio_pool
, bs
, cb
, opaque
);
148 dbs
->sector_num
= sector_num
;
149 dbs
->sg_cur_index
= 0;
150 dbs
->sg_cur_byte
= 0;
151 dbs
->is_write
= is_write
;
153 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
158 for (i
= 0; i
< qiov
->niov
; ++i
) {
159 qemu_sync_idcache((unsigned long)qiov
->iov
[i
].iov_base
,
160 (unsigned long)(qiov
->iov
[i
].iov_base
+ qiov
->iov
[i
].iov_len
));
165 qemu_aio_release(dbs
);
172 BlockDriverAIOCB
*dma_bdrv_read(BlockDriverState
*bs
,
173 QEMUSGList
*sg
, uint64_t sector
,
174 void (*cb
)(void *opaque
, int ret
), void *opaque
)
176 return dma_bdrv_io(bs
, sg
, sector
, cb
, opaque
, 0);
179 BlockDriverAIOCB
*dma_bdrv_write(BlockDriverState
*bs
,
180 QEMUSGList
*sg
, uint64_t sector
,
181 void (*cb
)(void *opaque
, int ret
), void *opaque
)
183 return dma_bdrv_io(bs
, sg
, sector
, cb
, opaque
, 1);
186 static void dma_aio_cancel(BlockDriverAIOCB
*acb
)
188 DMAAIOCB
*dbs
= container_of(acb
, DMAAIOCB
, common
);
191 bdrv_aio_cancel(dbs
->acb
);
195 void dma_helper_init(void)
197 aio_pool_init(&dma_aio_pool
, sizeof(DMAAIOCB
), dma_aio_cancel
);