etc/services - sync with NetBSD-8
[minix.git] / minix / lib / libvirtio / virtio.c
blobb71c96c7c25634ef27eaae7ce246025c8ef7a456
1 /*
2 * Generic virtio library for MINIX 3
4 * Copyright (c) 2013, A. Welzel, <arne.welzel@gmail.com>
6 * This software is released under the BSD license. See the LICENSE file
7 * included in the main directory of this source distribution for the
8 * license terms and conditions.
9 */
11 #define _SYSTEM 1
13 #include <assert.h>
14 #include <errno.h> /* for OK... */
15 #include <string.h> /* memset() */
16 #include <stdlib.h> /* malloc() */
18 #include <machine/pci.h> /* PCI_ILR, PCI_BAR... */
19 #include <machine/vmparam.h> /* PAGE_SIZE */
21 #include <minix/syslib.h> /* umap, vumap, alloc_..*/
22 #include <minix/sysutil.h> /* panic(), at least */
23 #include <minix/virtio.h> /* virtio system include */
25 #include "virtio_ring.h" /* virtio types / helper */
28 * About indirect descriptors:
30 * For each possible thread, a single indirect descriptor table is allocated.
31 * If using direct descriptors would lead to the situation that another thread
32 * might not be able to add another descriptor to the ring, indirect descriptors
33 * are used.
35 * Indirect descriptors are pre-allocated. Each alloc_contig() call involves a
36 * kernel call which is critical for performance.
38 * The size of indirect descriptor tables is chosen based on MAPVEC_NR. A driver
39 * using this library should never add more than
41 * MAPVEC_NR + MAPVEC_NR / 2
43 * descriptors to a queue as this represent the maximum size of an indirect
44 * descriptor table.
47 struct indirect_desc_table {
48 int in_use;
49 struct vring_desc *descs;
50 phys_bytes paddr;
51 size_t len;
54 struct virtio_queue {
56 void *vaddr; /* virtual addr of ring */
57 phys_bytes paddr; /* physical addr of ring */
58 u32_t page; /* physical guest page */
60 u16_t num; /* number of descriptors */
61 u32_t ring_size; /* size of ring in bytes */
62 struct vring vring;
64 u16_t free_num; /* free descriptors */
65 u16_t free_head; /* next free descriptor */
66 u16_t free_tail; /* last free descriptor */
67 u16_t last_used; /* we checked in used */
69 void **data; /* points to pointers */
72 struct virtio_device {
74 const char *name; /* for debugging */
76 u16_t port; /* io port */
78 struct virtio_feature *features; /* host / guest features */
79 u8_t num_features; /* max 32 */
81 struct virtio_queue *queues; /* our queues */
82 u16_t num_queues;
84 int irq; /* interrupt line */
85 int irq_hook; /* hook id */
86 int msi; /* is MSI enabled? */
88 int threads; /* max number of threads */
90 struct indirect_desc_table *indirect; /* indirect descriptor tables */
91 int num_indirect;
94 static int is_matching_device(u16_t expected_sdid, u16_t vid, u16_t sdid);
95 static int init_device(int devind, struct virtio_device *dev);
96 static int init_phys_queues(struct virtio_device *dev);
97 static int exchange_features(struct virtio_device *dev);
98 static int alloc_phys_queue(struct virtio_queue *q);
99 static void free_phys_queue(struct virtio_queue *q);
100 static void init_phys_queue(struct virtio_queue *q);
101 static int init_indirect_desc_table(struct indirect_desc_table *desc);
102 static int init_indirect_desc_tables(struct virtio_device *dev);
103 static void virtio_irq_register(struct virtio_device *dev);
104 static void virtio_irq_unregister(struct virtio_device *dev);
105 static int wants_kick(struct virtio_queue *q);
106 static void kick_queue(struct virtio_device *dev, int qidx);
108 struct virtio_device *
109 virtio_setup_device(u16_t subdevid, const char *name,
110 struct virtio_feature *features, int num_features,
111 int threads, int skip)
113 int r, devind;
114 u16_t vid, did, sdid;
115 struct virtio_device *ret;
117 /* bogus values? */
118 if (skip < 0 || name == NULL || num_features < 0 || threads <= 0)
119 return NULL;
121 pci_init();
123 r = pci_first_dev(&devind, &vid, &did);
125 while (r > 0) {
126 sdid = pci_attr_r16(devind, PCI_SUBDID);
127 if (is_matching_device(subdevid, vid, sdid)) {
129 /* this is the device we are looking for */
130 if (skip == 0)
131 break;
133 skip--;
136 r = pci_next_dev(&devind, &vid, &did);
139 /* pci_[first|next_dev()] return 0 if no device was found */
140 if (r == 0 || skip > 0)
141 return NULL;
143 /* allocate and set known info about the device */
144 ret = malloc(sizeof(*ret));
146 if (ret == NULL)
147 return NULL;
149 /* Prepare virtio_device intance */
150 memset(ret, 0, sizeof(*ret));
151 ret->name = name;
152 ret->features = features;
153 ret->num_features = num_features;
154 ret->threads = threads;
155 /* see comment in the beginning of this file */
156 ret->num_indirect = threads;
158 if (init_device(devind, ret) != OK) {
159 printf("%s: Could not initialize device\n", ret->name);
160 goto err;
163 /* Ack the device */
164 virtio_write8(ret, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_ACK);
166 if (exchange_features(ret) != OK) {
167 printf("%s: Could not exchange features\n", ret->name);
168 goto err;
171 if (init_indirect_desc_tables(ret) != OK) {
172 printf("%s: Could not initialize indirect tables\n", ret->name);
173 goto err;
176 /* We know how to drive the device... */
177 virtio_write8(ret, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_DRV);
179 return ret;
181 /* Error path */
182 err:
183 free(ret);
184 return NULL;
187 static int
188 init_device(int devind, struct virtio_device *dev)
190 u32_t base, size;
191 int iof, r;
193 pci_reserve(devind);
195 if ((r = pci_get_bar(devind, PCI_BAR, &base, &size, &iof)) != OK) {
196 printf("%s: Could not get BAR (%d)", dev->name, r);
197 return r;
200 if (!iof) {
201 printf("%s: PCI not IO space?", dev->name);
202 return EINVAL;
205 if (base & 0xFFFF0000) {
206 printf("%s: IO port weird (%08x)", dev->name, base);
207 return EINVAL;
210 /* store the I/O port */
211 dev->port = base;
213 /* Reset the device */
214 virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, 0);
216 /* Read IRQ line */
217 dev->irq = pci_attr_r8(devind, PCI_ILR);
219 return OK;
222 static int
223 exchange_features(struct virtio_device *dev)
225 u32_t guest_features = 0, host_features = 0;
226 struct virtio_feature *f;
228 host_features = virtio_read32(dev, VIRTIO_HOST_F_OFF);
230 for (int i = 0; i < dev->num_features; i++) {
231 f = &dev->features[i];
233 /* prepare the features the driver supports */
234 guest_features |= (f->guest_support << f->bit);
236 /* just load the host feature int the struct */
237 f->host_support = ((host_features >> f->bit) & 1);
240 /* let the device know about our features */
241 virtio_write32(dev, VIRTIO_GUEST_F_OFF, guest_features);
243 return OK;
247 virtio_alloc_queues(struct virtio_device *dev, int num_queues)
249 int r = OK;
251 assert(dev != NULL);
253 /* Assume there's no device with more than 256 queues */
254 if (num_queues < 0 || num_queues > 256)
255 return EINVAL;
257 dev->num_queues = num_queues;
258 /* allocate queue memory */
259 dev->queues = malloc(num_queues * sizeof(dev->queues[0]));
261 if (dev->queues == NULL)
262 return ENOMEM;
264 memset(dev->queues, 0, num_queues * sizeof(dev->queues[0]));
266 if ((r = init_phys_queues(dev)) != OK) {
267 printf("%s: Could not initialize queues (%d)\n", dev->name, r);
268 free(dev->queues);
269 dev->queues = NULL;
272 return r;
275 static int
276 init_phys_queues(struct virtio_device *dev)
278 /* Initialize all queues */
279 int i, j, r;
280 struct virtio_queue *q;
282 for (i = 0; i < dev->num_queues; i++) {
283 q = &dev->queues[i];
284 /* select the queue */
285 virtio_write16(dev, VIRTIO_QSEL_OFF, i);
286 q->num = virtio_read16(dev, VIRTIO_QSIZE_OFF);
288 if (q->num & (q->num - 1)) {
289 printf("%s: Queue %d num=%d not ^2", dev->name, i,
290 q->num);
291 r = EINVAL;
292 goto free_phys_queues;
295 if ((r = alloc_phys_queue(q)) != OK)
296 goto free_phys_queues;
298 init_phys_queue(q);
300 /* Let the host know about the guest physical page */
301 virtio_write32(dev, VIRTIO_QADDR_OFF, q->page);
304 return OK;
306 /* Error path */
307 free_phys_queues:
308 for (j = 0; j < i; j++)
309 free_phys_queue(&dev->queues[i]);
311 return r;
314 static int
315 alloc_phys_queue(struct virtio_queue *q)
317 assert(q != NULL);
319 /* How much memory do we need? */
320 q->ring_size = vring_size(q->num, PAGE_SIZE);
322 q->vaddr = alloc_contig(q->ring_size, AC_ALIGN4K, &q->paddr);
324 if (q->vaddr == NULL)
325 return ENOMEM;
327 q->data = alloc_contig(sizeof(q->data[0]) * q->num, AC_ALIGN4K, NULL);
329 if (q->data == NULL) {
330 free_contig(q->vaddr, q->ring_size);
331 q->vaddr = NULL;
332 q->paddr = 0;
333 return ENOMEM;
336 return OK;
339 void
340 virtio_device_ready(struct virtio_device *dev)
342 assert(dev != NULL);
344 /* Register IRQ line */
345 virtio_irq_register(dev);
347 /* Driver is ready to go! */
348 virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, VIRTIO_STATUS_DRV_OK);
351 void
352 virtio_free_queues(struct virtio_device *dev)
354 int i;
355 assert(dev != NULL);
356 assert(dev->queues != NULL);
357 assert(dev->num_queues > 0);
359 for (i = 0; i < dev->num_queues; i++)
360 free_phys_queue(&dev->queues[i]);
362 dev->num_queues = 0;
363 dev->queues = NULL;
366 static void
367 free_phys_queue(struct virtio_queue *q)
369 assert(q != NULL);
370 assert(q->vaddr != NULL);
372 free_contig(q->vaddr, q->ring_size);
373 q->vaddr = NULL;
374 q->paddr = 0;
375 q->num = 0;
376 free_contig(q->data, sizeof(q->data[0]));
377 q->data = NULL;
380 static void
381 init_phys_queue(struct virtio_queue *q)
383 memset(q->vaddr, 0, q->ring_size);
384 memset(q->data, 0, sizeof(q->data[0]) * q->num);
386 /* physical page in guest */
387 q->page = q->paddr / PAGE_SIZE;
389 /* Set pointers in q->vring according to size */
390 vring_init(&q->vring, q->num, q->vaddr, PAGE_SIZE);
392 /* Everything's free at this point */
393 for (int i = 0; i < q->num; i++) {
394 q->vring.desc[i].flags = VRING_DESC_F_NEXT;
395 q->vring.desc[i].next = (i + 1) & (q->num - 1);
398 q->free_num = q->num;
399 q->free_head = 0;
400 q->free_tail = q->num - 1;
401 q->last_used = 0;
403 return;
406 void
407 virtio_free_device(struct virtio_device *dev)
409 int i;
410 struct indirect_desc_table *desc;
412 assert(dev != NULL);
414 assert(dev->num_indirect > 0);
416 for (i = 0; i < dev->num_indirect; i++) {
417 desc = &dev->indirect[i];
418 free_contig(desc->descs, desc->len);
421 dev->num_indirect = 0;
423 assert(dev->indirect != NULL);
424 free(dev->indirect);
425 dev->indirect = NULL;
427 free(dev);
430 static int
431 init_indirect_desc_table(struct indirect_desc_table *desc)
433 desc->in_use = 0;
434 desc->len = (MAPVEC_NR + MAPVEC_NR / 2) * sizeof(struct vring_desc);
436 desc->descs = alloc_contig(desc->len, AC_ALIGN4K, &desc->paddr);
437 memset(desc->descs, 0, desc->len);
439 if (desc->descs == NULL)
440 return ENOMEM;
442 return OK;
445 static int
446 init_indirect_desc_tables(struct virtio_device *dev)
448 int i, j, r;
449 struct indirect_desc_table *desc;
451 dev->indirect = malloc(dev->num_indirect * sizeof(dev->indirect[0]));
453 if (dev->indirect == NULL) {
454 printf("%s: Could not allocate indirect tables\n", dev->name);
455 return ENOMEM;
458 memset(dev->indirect, 0, dev->num_indirect* sizeof(dev->indirect[0]));
460 for (i = 0; i < dev->num_indirect; i++) {
461 desc = &dev->indirect[i];
462 if ((r = init_indirect_desc_table(desc)) != OK) {
464 /* error path */
465 for (j = 0; j < i; j++) {
466 desc = &dev->indirect[j];
467 free_contig(desc->descs, desc->len);
470 free(dev->indirect);
472 return r;
476 return OK;
479 static void
480 clear_indirect_table(struct virtio_device *dev, struct vring_desc *vd)
482 int i;
483 struct indirect_desc_table *desc;
485 assert(vd->len > 0);
486 assert(vd->flags & VRING_DESC_F_INDIRECT);
487 vd->flags = vd->flags & ~VRING_DESC_F_INDIRECT;
488 vd->len = 0;;
490 for (i = 0; i < dev->num_indirect; i++) {
491 desc = &dev->indirect[i];
493 if (desc->paddr == vd->addr) {
494 assert(desc->in_use);
495 desc->in_use = 0;
496 break;
500 if (i >= dev->num_indirect)
501 panic("Could not clear indirect descriptor table ");
505 inline static void
506 use_vring_desc(struct vring_desc *vd, struct vumap_phys *vp)
508 vd->addr = vp->vp_addr & ~1UL;
509 vd->len = vp->vp_size;
510 vd->flags = VRING_DESC_F_NEXT;
512 if (vp->vp_addr & 1)
513 vd->flags |= VRING_DESC_F_WRITE;
516 static void
517 set_indirect_descriptors(struct virtio_device *dev, struct virtio_queue *q,
518 struct vumap_phys *bufs, size_t num)
520 /* Indirect descriptor tables are simply filled from left to right */
521 int i;
522 struct indirect_desc_table *desc;
523 struct vring *vring = &q->vring;
524 struct vring_desc *vd, *ivd = NULL;
526 if (0 == num)
527 return;
529 /* Find the first unused indirect descriptor table */
530 for (i = 0; i < dev->num_indirect; i++) {
531 desc = &dev->indirect[i];
533 /* If an unused indirect descriptor table was found,
534 * mark it as being used and exit the loop.
536 if (!desc->in_use) {
537 desc->in_use = 1;
538 break;
542 /* Sanity check */
543 if (i >= dev->num_indirect)
544 panic("No indirect descriptor tables left");
546 /* For indirect descriptor tables, only a single descriptor from
547 * the main ring is used.
549 vd = &vring->desc[q->free_head];
550 vd->flags = VRING_DESC_F_INDIRECT;
551 vd->addr = desc->paddr;
552 vd->len = num * sizeof(desc->descs[0]);
554 /* Initialize the descriptors in the indirect descriptor table */
555 for (i = 0; i < (int)num; i++) {
556 ivd = &desc->descs[i];
558 use_vring_desc(ivd, &bufs[i]);
559 ivd->next = i + 1;
562 /* Unset the next bit of the last descriptor */
563 if (NULL != ivd)
564 ivd->flags = ivd->flags & ~VRING_DESC_F_NEXT;
566 /* Update queue, only a single descriptor was used */
567 q->free_num -= 1;
568 q->free_head = vd->next;
571 static void
572 set_direct_descriptors(struct virtio_queue *q, struct vumap_phys *bufs,
573 size_t num)
575 u16_t i;
576 size_t count;
577 struct vring *vring = &q->vring;
578 struct vring_desc *vd;
580 if (0 == num)
581 return;
583 for (i = q->free_head, count = 0; count < num; count++) {
585 /* The next free descriptor */
586 vd = &vring->desc[i];
588 /* The descriptor is linked in the free list, so
589 * it always has the next bit set.
591 assert(vd->flags & VRING_DESC_F_NEXT);
593 use_vring_desc(vd, &bufs[count]);
594 i = vd->next;
597 /* Unset the next bit of the last descriptor */
598 vd->flags = vd->flags & ~VRING_DESC_F_NEXT;
600 /* Update queue */
601 q->free_num -= num;
602 q->free_head = i;
606 virtio_to_queue(struct virtio_device *dev, int qidx, struct vumap_phys *bufs,
607 size_t num, void *data)
609 u16_t free_first;
610 int left;
611 struct virtio_queue *q = &dev->queues[qidx];
612 struct vring *vring = &q->vring;
614 assert(0 <= qidx && qidx <= dev->num_queues);
616 if (!data)
617 panic("%s: NULL data received queue %d", dev->name, qidx);
619 free_first = q->free_head;
621 left = (int)q->free_num - (int)num;
623 if (left < dev->threads)
624 set_indirect_descriptors(dev, q, bufs, num);
625 else
626 set_direct_descriptors(q, bufs, num);
628 /* Next index for host is old free_head */
629 vring->avail->ring[vring->avail->idx % q->num] = free_first;
631 /* Provided by the caller to identify this slot */
632 q->data[free_first] = data;
634 /* Make sure the host sees the new descriptors */
635 __insn_barrier();
637 /* advance last idx */
638 vring->avail->idx += 1;
640 /* Make sure the host sees the avail->idx */
641 __insn_barrier();
643 /* kick it! */
644 kick_queue(dev, qidx);
645 return 0;
649 virtio_from_queue(struct virtio_device *dev, int qidx, void **data,
650 size_t *len)
652 struct virtio_queue *q;
653 struct vring *vring;
654 struct vring_used_elem *uel;
655 struct vring_desc *vd;
656 int count = 0;
657 u16_t idx;
658 u16_t used_idx;
660 assert(0 <= qidx && qidx < dev->num_queues);
662 q = &dev->queues[qidx];
663 vring = &q->vring;
665 /* Make sure we see changes done by the host */
666 __insn_barrier();
668 /* The index from the host */
669 used_idx = vring->used->idx % q->num;
671 /* We already saw this one, nothing to do here */
672 if (q->last_used == used_idx)
673 return -1;
675 /* Get the vring_used element */
676 uel = &q->vring.used->ring[q->last_used];
678 /* Update the last used element */
679 q->last_used = (q->last_used + 1) % q->num;
681 /* index of the used element */
682 idx = uel->id % q->num;
684 assert(q->data[idx] != NULL);
686 /* Get the descriptor */
687 vd = &vring->desc[idx];
689 /* Unconditionally set the tail->next to the first used one */
690 assert(vring->desc[q->free_tail].flags & VRING_DESC_F_NEXT);
691 vring->desc[q->free_tail].next = idx;
693 /* Find the last index, eventually there has to be one
694 * without a the next flag.
696 * FIXME: Protect from endless loop
698 while (vd->flags & VRING_DESC_F_NEXT) {
700 if (vd->flags & VRING_DESC_F_INDIRECT)
701 clear_indirect_table(dev, vd);
703 idx = vd->next;
704 vd = &vring->desc[idx];
705 count++;
708 /* Didn't count the last one */
709 count++;
711 if (vd->flags & VRING_DESC_F_INDIRECT)
712 clear_indirect_table(dev, vd);
714 /* idx points to the tail now, update the queue */
715 q->free_tail = idx;
716 assert(!(vd->flags & VRING_DESC_F_NEXT));
718 /* We can always connect the tail with the head */
719 vring->desc[q->free_tail].next = q->free_head;
720 vring->desc[q->free_tail].flags = VRING_DESC_F_NEXT;
722 q->free_num += count;
724 assert(q->free_num <= q->num);
726 *data = q->data[uel->id];
727 q->data[uel->id] = NULL;
729 if (len != NULL)
730 *len = uel->len;
732 return 0;
736 virtio_had_irq(struct virtio_device *dev)
738 return virtio_read8(dev, VIRTIO_ISR_STATUS_OFF) & 1;
741 void
742 virtio_reset_device(struct virtio_device *dev)
744 virtio_irq_unregister(dev);
745 virtio_write8(dev, VIRTIO_DEV_STATUS_OFF, 0);
749 void
750 virtio_irq_enable(struct virtio_device *dev)
752 int r;
753 if ((r = sys_irqenable(&dev->irq_hook)) != OK)
754 panic("%s Unable to enable IRQ %d", dev->name, r);
757 void
758 virtio_irq_disable(struct virtio_device *dev)
760 int r;
761 if ((r = sys_irqdisable(&dev->irq_hook)) != OK)
762 panic("%s: Unable to disable IRQ %d", dev->name, r);
765 static int
766 wants_kick(struct virtio_queue *q)
768 assert(q != NULL);
769 return !(q->vring.used->flags & VRING_USED_F_NO_NOTIFY);
772 static void
773 kick_queue(struct virtio_device *dev, int qidx)
775 assert(0 <= qidx && qidx < dev->num_queues);
777 if (wants_kick(&dev->queues[qidx]))
778 virtio_write16(dev, VIRTIO_QNOTFIY_OFF, qidx);
780 return;
783 static int
784 is_matching_device(u16_t expected_sdid, u16_t vid, u16_t sdid)
786 return vid == VIRTIO_VENDOR_ID && sdid == expected_sdid;
789 static void
790 virtio_irq_register(struct virtio_device *dev)
792 int r;
793 if ((r = sys_irqsetpolicy(dev->irq, 0, &dev->irq_hook)) != OK)
794 panic("%s: Unable to register IRQ %d", dev->name, r);
797 static void
798 virtio_irq_unregister(struct virtio_device *dev)
800 int r;
801 if ((r = sys_irqrmpolicy(&dev->irq_hook)) != OK)
802 panic("%s: Unable to unregister IRQ %d", dev->name, r);
805 static int
806 _supports(struct virtio_device *dev, int bit, int host)
808 for (int i = 0; i < dev->num_features; i++) {
809 struct virtio_feature *f = &dev->features[i];
811 if (f->bit == bit)
812 return host ? f->host_support : f->guest_support;
815 panic("%s: Feature not found bit=%d", dev->name, bit);
819 virtio_host_supports(struct virtio_device *dev, int bit)
821 return _supports(dev, bit, 1);
825 virtio_guest_supports(struct virtio_device *dev, int bit)
827 return _supports(dev, bit, 0);
831 /* Just some wrappers around sys_read */
832 #define VIRTIO_READ_XX(xx, suff) \
833 u##xx##_t \
834 virtio_read##xx(struct virtio_device *dev, i32_t off) \
836 int r; \
837 u32_t ret; \
838 if ((r = sys_in##suff(dev->port + off, &ret)) != OK) \
839 panic("%s: Read failed %d %d r=%d", dev->name, \
840 dev->port, \
841 off, \
842 r); \
844 return ret; \
847 VIRTIO_READ_XX(32, l)
848 VIRTIO_READ_XX(16, w)
849 VIRTIO_READ_XX(8, b)
851 /* Just some wrappers around sys_write */
852 #define VIRTIO_WRITE_XX(xx, suff) \
853 void \
854 virtio_write##xx(struct virtio_device *dev, i32_t off, u##xx##_t val) \
856 int r; \
857 if ((r = sys_out##suff(dev->port + off, val)) != OK) \
858 panic("%s: Write failed %d %d r=%d", dev->name, \
859 dev->port, \
860 off, \
861 r); \
864 VIRTIO_WRITE_XX(32, l)
865 VIRTIO_WRITE_XX(16, w)
866 VIRTIO_WRITE_XX(8, b)
868 /* Just some wrappers around sys_read */
869 #define VIRTIO_SREAD_XX(xx, suff) \
870 u##xx##_t \
871 virtio_sread##xx(struct virtio_device *dev, i32_t off) \
873 int r; \
874 u32_t ret; \
875 off += VIRTIO_DEV_SPECIFIC_OFF; \
877 if (dev->msi) \
878 off += VIRTIO_MSI_ADD_OFF; \
880 if ((r = sys_in##suff(dev->port + off, &ret)) != OK) \
881 panic("%s: Read failed %d %d r=%d", dev->name, \
882 dev->port, \
883 off, \
884 r); \
886 return ret; \
889 VIRTIO_SREAD_XX(32, l)
890 VIRTIO_SREAD_XX(16, w)
891 VIRTIO_SREAD_XX(8, b)
893 /* Just some wrappers around sys_write */
894 #define VIRTIO_SWRITE_XX(xx, suff) \
895 void \
896 virtio_swrite##xx(struct virtio_device *dev, i32_t off, u##xx##_t val) \
898 int r; \
899 off += VIRTIO_DEV_SPECIFIC_OFF; \
901 if (dev->msi) \
902 off += VIRTIO_MSI_ADD_OFF; \
904 if ((r = sys_out##suff(dev->port + off, val)) != OK) \
905 panic("%s: Write failed %d %d r=%d", dev->name, \
906 dev->port, \
907 off, \
908 r); \
911 VIRTIO_SWRITE_XX(32, l)
912 VIRTIO_SWRITE_XX(16, w)
913 VIRTIO_SWRITE_XX(8, b)