Added cache configuration to command line options.
[qemu/ovp.git] / qemu-vio-virtio.c
bloba4a39fd6c76fa72af0dfe6798a0d4faa649917ed
1 #include "qemu-vio-virtio.h"
2 #include <linux/types.h>
3 #include <string.h>
4 #include <stdio.h>
5 #include <linux/virtio_vio.h>
6 #include <linux/virtio_ring.h>
9 extern target_phys_addr_t data_buffaddr;
11 //#define trace() printf("QEMU-VIO-VIRTIO: %s-%s: %s(%d)\n",__TIME__, __FILE__,__func__, __LINE__)
12 #define trace() ;
14 #define printk printf
16 static inline target_phys_addr_t vring_align(target_phys_addr_t addr,
17 unsigned long align)
19 return (addr + align - 1) & ~(align - 1);
22 static inline uint64_t vring_desc_addr(target_phys_addr_t desc_pa, int i)
24 target_phys_addr_t pa,tmp;
25 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
27 tmp = ldq_phys(pa);
29 return ldq_phys(pa);
32 static inline uint32_t vring_desc_offset(target_phys_addr_t desc_pa, int i)
34 target_phys_addr_t pa;
35 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, offset);
36 return ldl_phys(pa);
39 static inline uint32_t vring_desc_poolidx(target_phys_addr_t desc_pa, int i)
41 target_phys_addr_t pa;
42 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, poolidx);
43 return ldl_phys(pa);
46 static inline uint32_t vring_desc_len(target_phys_addr_t desc_pa, int i)
48 target_phys_addr_t pa;
49 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
51 return ldl_phys(pa);
54 static inline uint16_t vring_desc_flags(target_phys_addr_t desc_pa, int i)
56 target_phys_addr_t pa;
57 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
59 return lduw_phys(pa);
62 static inline uint16_t vring_desc_next(target_phys_addr_t desc_pa, int i)
64 target_phys_addr_t pa;
68 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
70 return lduw_phys(pa);
73 static inline uint16_t vring_avail_flags(VirtQueue *vq)
75 target_phys_addr_t pa;
76 pa = vq->vring.avail + offsetof(VRingAvail, flags);
78 return lduw_phys(pa);
81 static inline uint16_t vring_avail_idx(VirtQueue *vq)
83 target_phys_addr_t pa;
85 pa = vq->vring.avail + offsetof(VRingAvail, idx);
87 return lduw_phys(pa);
90 static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
92 target_phys_addr_t pa;
93 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
95 return lduw_phys(pa);
98 static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
100 target_phys_addr_t pa;
101 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
102 stl_phys(pa, val);
105 static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
107 target_phys_addr_t pa;
108 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
109 stl_phys(pa, val);
112 static uint16_t vring_used_idx(VirtQueue *vq)
114 target_phys_addr_t pa;
115 pa = vq->vring.used + offsetof(VRingUsed, idx);
117 return lduw_phys(pa);
120 static inline void vring_used_idx_increment(VirtQueue *vq, uint16_t val)
122 target_phys_addr_t pa;
123 pa = vq->vring.used + offsetof(VRingUsed, idx);
124 stw_phys(pa, vring_used_idx(vq) + val);
127 static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
129 target_phys_addr_t pa;
130 pa = vq->vring.used + offsetof(VRingUsed, flags);
131 stw_phys(pa, lduw_phys(pa) | mask);
134 static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
136 target_phys_addr_t pa;
137 pa = vq->vring.used + offsetof(VRingUsed, flags);
138 stw_phys(pa, lduw_phys(pa) & ~mask);
141 static inline void virtio_queue_set_notification(VirtQueue *vq, int enable)
143 if (enable)
144 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
145 else
146 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
149 static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
151 unsigned int head;
153 // Grab the next descriptor number they're advertising, and increment
154 // the index we've seen.
155 head = vring_avail_ring(vq, idx % vq->vring.num);
157 // If their number is silly, that's a fatal mistake.
158 if (head >= vq->vring.num) {
159 fprintf(stderr, "Guest says index %u is available", head);
160 return -1;
163 return head;
166 static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
168 uint16_t num_heads = vring_avail_idx(vq) - idx;
170 //Check it isn't doing very strange things with descriptor numbers.
171 if (num_heads > vq->vring.num) {
172 fprintf(stderr, "Guest moved used index from %u to %u",
173 idx, vring_avail_idx(vq));
174 return -1;
177 return num_heads;
180 static unsigned virtqueue_next_desc(target_phys_addr_t desc_pa,
181 unsigned int i, unsigned int max)
183 unsigned int next;
185 //If this descriptor says it doesn't chain, we're done.
186 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
187 return max;
189 // Check they're not leading us off end of descriptors.
190 next = vring_desc_next(desc_pa, i);
191 // Make sure compiler knows to grab that: we don't want it changing!
192 //wmb();
194 if (next >= max) {
195 fprintf(stderr, "Desc next is %u", next);
196 return -1;
199 return next;
202 static void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
203 unsigned int len, unsigned int idx)
205 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
207 /* Get a pointer to the next entry in the used ring. */
208 vring_used_ring_id(vq, idx, elem->index);
209 vring_used_ring_len(vq, idx, len);
212 static void virtqueue_flush(VirtQueue *vq, unsigned int count)
214 /* Make sure buffer is written before we update index. */
215 //wmb();
217 vring_used_idx_increment(vq, count);
218 vq->inuse -= count;
221 void virtqueue_init(VirtQueue *vq)
223 target_phys_addr_t pa = vq->pa;
225 vq->vring.desc = pa;
226 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
227 vq->vring.used = vring_align(vq->vring.avail +
228 offsetof(VRingAvail, ring[vq->vring.num]),
229 VIRTIO_PCI_VRING_ALIGN);
232 int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
234 unsigned int i, head, max;
235 target_phys_addr_t desc_pa = vq->vring.desc;
236 target_phys_addr_t len;
241 if (!virtqueue_num_heads(vq, vq->last_avail_idx)){
242 return 0;
245 // When we start there are none of either input nor output.
246 elem->out_num = elem->in_num = 0;
248 max = vq->vring.num;
251 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
254 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
255 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
256 fprintf(stderr, "Invalid size for indirect buffer table\n");
257 return -1;
260 // loop over the indirect descriptor table
261 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
262 desc_pa = vring_desc_addr(desc_pa, i);
263 i = 0;
267 do {
268 struct myiovec *sg;
269 int is_write = 0;
271 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
272 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
273 sg = (struct myiovec *)&elem->in_sg[elem->in_num++];
274 is_write = 1;
275 } else
276 sg = (struct myiovec *)&elem->out_sg[elem->out_num++];
278 // Grab the first descriptor, and check it's OK.
279 sg->iov_len = vring_desc_len(desc_pa, i);
280 len = sg->iov_len;
281 //Check with our function
283 sg->iov_base = (void *)(vring_desc_offset(desc_pa, i) + (char *)data_buffaddr);
285 if (sg->iov_base == NULL || len != sg->iov_len) {
286 fprintf(stderr, "qemu-vio-virtio: trying to map MMIO memory\n");
287 return -1;
290 // If we've got too many, that implies a descriptor loop.
291 if ((elem->in_num + elem->out_num) > max) {
292 fprintf(stderr, "Looped descriptor");
293 return -1;
295 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
297 elem->index = head;
299 vq->inuse++;
301 return elem->in_num + elem->out_num;
304 void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
305 unsigned int len)
307 virtqueue_fill(vq, elem, len, 0);
309 virtqueue_flush(vq, 1);
313 void virtio_notify(VirtQueue *vq)