1 #ifndef QEMU_HW_XEN_COMMON_H
2 #define QEMU_HW_XEN_COMMON_H
5 * If we have new enough libxenctrl then we do not want/need these compat
6 * interfaces, despite what the user supplied cflags might say. They
7 * must be undefined before including xenctrl.h
9 #undef XC_WANT_COMPAT_EVTCHN_API
10 #undef XC_WANT_COMPAT_GNTTAB_API
11 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
15 #include <xen/io/xenbus.h>
18 #include "hw/xen/xen.h"
19 #include "hw/pci/pci.h"
20 #include "qemu/queue.h"
21 #include "hw/xen/trace.h"
23 extern xc_interface
*xen_xc
;
26 * We don't support Xen prior to 4.2.0.
29 /* Xen 4.2 through 4.6 */
30 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40701
32 typedef xc_interface xenforeignmemory_handle
;
33 typedef xc_evtchn xenevtchn_handle
;
34 typedef xc_gnttab xengnttab_handle
;
36 #define xenevtchn_open(l, f) xc_evtchn_open(l, f);
37 #define xenevtchn_close(h) xc_evtchn_close(h)
38 #define xenevtchn_fd(h) xc_evtchn_fd(h)
39 #define xenevtchn_pending(h) xc_evtchn_pending(h)
40 #define xenevtchn_notify(h, p) xc_evtchn_notify(h, p)
41 #define xenevtchn_bind_interdomain(h, d, p) xc_evtchn_bind_interdomain(h, d, p)
42 #define xenevtchn_unmask(h, p) xc_evtchn_unmask(h, p)
43 #define xenevtchn_unbind(h, p) xc_evtchn_unbind(h, p)
45 #define xengnttab_open(l, f) xc_gnttab_open(l, f)
46 #define xengnttab_close(h) xc_gnttab_close(h)
47 #define xengnttab_set_max_grants(h, n) xc_gnttab_set_max_grants(h, n)
48 #define xengnttab_map_grant_ref(h, d, r, p) xc_gnttab_map_grant_ref(h, d, r, p)
49 #define xengnttab_unmap(h, a, n) xc_gnttab_munmap(h, a, n)
50 #define xengnttab_map_grant_refs(h, c, d, r, p) \
51 xc_gnttab_map_grant_refs(h, c, d, r, p)
52 #define xengnttab_map_domain_grant_refs(h, c, d, r, p) \
53 xc_gnttab_map_domain_grant_refs(h, c, d, r, p)
55 #define xenforeignmemory_open(l, f) xen_xc
56 #define xenforeignmemory_close(h)
58 static inline void *xenforeignmemory_map(xc_interface
*h
, uint32_t dom
,
59 int prot
, size_t pages
,
60 const xen_pfn_t arr
[/*pages*/],
64 return xc_map_foreign_bulk(h
, dom
, prot
, arr
, err
, pages
);
66 return xc_map_foreign_pages(h
, dom
, prot
, arr
, pages
);
69 #define xenforeignmemory_unmap(h, p, s) munmap(p, s * XC_PAGE_SIZE)
71 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40701 */
73 #include <xenevtchn.h>
74 #include <xengnttab.h>
75 #include <xenforeignmemory.h>
79 extern xenforeignmemory_handle
*xen_fmem
;
81 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
83 typedef xc_interface xendevicemodel_handle
;
85 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
87 #undef XC_WANT_COMPAT_DEVICEMODEL_API
88 #include <xendevicemodel.h>
92 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
94 static inline int xendevicemodel_relocate_memory(
95 xendevicemodel_handle
*dmod
, domid_t domid
, uint32_t size
, uint64_t src_gfn
,
101 for (i
= 0; i
< size
; i
++) {
102 unsigned long idx
= src_gfn
+ i
;
103 xen_pfn_t gpfn
= dst_gfn
+ i
;
105 rc
= xc_domain_add_to_physmap(xen_xc
, domid
, XENMAPSPACE_gmfn
, idx
,
115 static inline int xendevicemodel_pin_memory_cacheattr(
116 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t start
, uint64_t end
,
119 return xc_domain_pin_memory_cacheattr(xen_xc
, domid
, start
, end
, type
);
122 typedef void xenforeignmemory_resource_handle
;
124 #define XENMEM_resource_ioreq_server 0
126 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
127 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
129 static inline xenforeignmemory_resource_handle
*xenforeignmemory_map_resource(
130 xenforeignmemory_handle
*fmem
, domid_t domid
, unsigned int type
,
131 unsigned int id
, unsigned long frame
, unsigned long nr_frames
,
132 void **paddr
, int prot
, int flags
)
138 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
140 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
142 #define XEN_COMPAT_PHYSMAP
143 static inline void *xenforeignmemory_map2(xenforeignmemory_handle
*h
,
144 uint32_t dom
, void *addr
,
145 int prot
, int flags
, size_t pages
,
146 const xen_pfn_t arr
[/*pages*/],
149 assert(addr
== NULL
&& flags
== 0);
150 return xenforeignmemory_map(h
, dom
, prot
, pages
, arr
, err
);
153 static inline int xentoolcore_restrict_all(domid_t domid
)
159 static inline int xendevicemodel_shutdown(xendevicemodel_handle
*dmod
,
160 domid_t domid
, unsigned int reason
)
166 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
168 #include <xentoolcore.h>
172 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
174 static inline xendevicemodel_handle
*xendevicemodel_open(
175 struct xentoollog_logger
*logger
, unsigned int open_flags
)
180 #if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500
182 static inline int xendevicemodel_create_ioreq_server(
183 xendevicemodel_handle
*dmod
, domid_t domid
, int handle_bufioreq
,
186 return xc_hvm_create_ioreq_server(dmod
, domid
, handle_bufioreq
,
190 static inline int xendevicemodel_get_ioreq_server_info(
191 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
192 xen_pfn_t
*ioreq_pfn
, xen_pfn_t
*bufioreq_pfn
,
193 evtchn_port_t
*bufioreq_port
)
195 return xc_hvm_get_ioreq_server_info(dmod
, domid
, id
, ioreq_pfn
,
196 bufioreq_pfn
, bufioreq_port
);
199 static inline int xendevicemodel_map_io_range_to_ioreq_server(
200 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
201 uint64_t start
, uint64_t end
)
203 return xc_hvm_map_io_range_to_ioreq_server(dmod
, domid
, id
, is_mmio
,
207 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
208 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
209 uint64_t start
, uint64_t end
)
211 return xc_hvm_unmap_io_range_from_ioreq_server(dmod
, domid
, id
, is_mmio
,
215 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
216 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
217 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
219 return xc_hvm_map_pcidev_to_ioreq_server(dmod
, domid
, id
, segment
,
220 bus
, device
, function
);
223 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
224 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
225 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
227 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod
, domid
, id
, segment
,
228 bus
, device
, function
);
231 static inline int xendevicemodel_destroy_ioreq_server(
232 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
)
234 return xc_hvm_destroy_ioreq_server(dmod
, domid
, id
);
237 static inline int xendevicemodel_set_ioreq_server_state(
238 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int enabled
)
240 return xc_hvm_set_ioreq_server_state(dmod
, domid
, id
, enabled
);
243 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40500 */
245 static inline int xendevicemodel_set_pci_intx_level(
246 xendevicemodel_handle
*dmod
, domid_t domid
, uint16_t segment
,
247 uint8_t bus
, uint8_t device
, uint8_t intx
, unsigned int level
)
249 return xc_hvm_set_pci_intx_level(dmod
, domid
, segment
, bus
, device
,
253 static inline int xendevicemodel_set_isa_irq_level(
254 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t irq
,
257 return xc_hvm_set_isa_irq_level(dmod
, domid
, irq
, level
);
260 static inline int xendevicemodel_set_pci_link_route(
261 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t link
, uint8_t irq
)
263 return xc_hvm_set_pci_link_route(dmod
, domid
, link
, irq
);
266 static inline int xendevicemodel_inject_msi(
267 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t msi_addr
,
270 return xc_hvm_inject_msi(dmod
, domid
, msi_addr
, msi_data
);
273 static inline int xendevicemodel_track_dirty_vram(
274 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
275 uint32_t nr
, unsigned long *dirty_bitmap
)
277 return xc_hvm_track_dirty_vram(dmod
, domid
, first_pfn
, nr
,
281 static inline int xendevicemodel_modified_memory(
282 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
285 return xc_hvm_modified_memory(dmod
, domid
, first_pfn
, nr
);
288 static inline int xendevicemodel_set_mem_type(
289 xendevicemodel_handle
*dmod
, domid_t domid
, hvmmem_type_t mem_type
,
290 uint64_t first_pfn
, uint32_t nr
)
292 return xc_hvm_set_mem_type(dmod
, domid
, mem_type
, first_pfn
, nr
);
297 extern xendevicemodel_handle
*xen_dmod
;
299 static inline int xen_set_mem_type(domid_t domid
, hvmmem_type_t type
,
300 uint64_t first_pfn
, uint32_t nr
)
302 return xendevicemodel_set_mem_type(xen_dmod
, domid
, type
, first_pfn
,
306 static inline int xen_set_pci_intx_level(domid_t domid
, uint16_t segment
,
307 uint8_t bus
, uint8_t device
,
308 uint8_t intx
, unsigned int level
)
310 return xendevicemodel_set_pci_intx_level(xen_dmod
, domid
, segment
, bus
,
311 device
, intx
, level
);
314 static inline int xen_set_pci_link_route(domid_t domid
, uint8_t link
,
317 return xendevicemodel_set_pci_link_route(xen_dmod
, domid
, link
, irq
);
320 static inline int xen_inject_msi(domid_t domid
, uint64_t msi_addr
,
323 return xendevicemodel_inject_msi(xen_dmod
, domid
, msi_addr
, msi_data
);
326 static inline int xen_set_isa_irq_level(domid_t domid
, uint8_t irq
,
329 return xendevicemodel_set_isa_irq_level(xen_dmod
, domid
, irq
, level
);
332 static inline int xen_track_dirty_vram(domid_t domid
, uint64_t first_pfn
,
333 uint32_t nr
, unsigned long *bitmap
)
335 return xendevicemodel_track_dirty_vram(xen_dmod
, domid
, first_pfn
, nr
,
339 static inline int xen_modified_memory(domid_t domid
, uint64_t first_pfn
,
342 return xendevicemodel_modified_memory(xen_dmod
, domid
, first_pfn
, nr
);
345 static inline int xen_restrict(domid_t domid
)
348 rc
= xentoolcore_restrict_all(domid
);
349 trace_xen_domid_restrict(rc
? errno
: 0);
353 void destroy_hvm_domain(bool reboot
);
355 /* shutdown/destroy current domain because of an error */
356 void xen_shutdown_fatal_error(const char *fmt
, ...) GCC_FMT_ATTR(1, 2);
358 #ifdef HVM_PARAM_VMPORT_REGS_PFN
359 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
360 xen_pfn_t
*vmport_regs_pfn
)
364 rc
= xc_hvm_param_get(xc
, dom
, HVM_PARAM_VMPORT_REGS_PFN
, &value
);
366 *vmport_regs_pfn
= (xen_pfn_t
) value
;
371 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
372 xen_pfn_t
*vmport_regs_pfn
)
379 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40600
381 #ifndef HVM_IOREQSRV_BUFIOREQ_ATOMIC
382 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
387 static inline int xen_get_default_ioreq_server_info(domid_t dom
,
388 xen_pfn_t
*ioreq_pfn
,
389 xen_pfn_t
*bufioreq_pfn
,
396 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_IOREQ_PFN
, ¶m
);
398 fprintf(stderr
, "failed to get HVM_PARAM_IOREQ_PFN\n");
404 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_PFN
, ¶m
);
406 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
410 *bufioreq_pfn
= param
;
412 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_EVTCHN
,
415 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
419 *bufioreq_evtchn
= param
;
425 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40500
427 #ifndef HVM_PARAM_BUFIOREQ_EVTCHN
428 #define HVM_PARAM_BUFIOREQ_EVTCHN 26
431 #define IOREQ_TYPE_PCI_CONFIG 2
433 typedef uint16_t ioservid_t
;
435 static inline void xen_map_memory_section(domid_t dom
,
437 MemoryRegionSection
*section
)
441 static inline void xen_unmap_memory_section(domid_t dom
,
443 MemoryRegionSection
*section
)
447 static inline void xen_map_io_section(domid_t dom
,
449 MemoryRegionSection
*section
)
453 static inline void xen_unmap_io_section(domid_t dom
,
455 MemoryRegionSection
*section
)
459 static inline void xen_map_pcidev(domid_t dom
,
465 static inline void xen_unmap_pcidev(domid_t dom
,
471 static inline void xen_create_ioreq_server(domid_t dom
,
472 ioservid_t
*ioservid
)
476 static inline void xen_destroy_ioreq_server(domid_t dom
,
481 static inline int xen_get_ioreq_server_info(domid_t dom
,
483 xen_pfn_t
*ioreq_pfn
,
484 xen_pfn_t
*bufioreq_pfn
,
485 evtchn_port_t
*bufioreq_evtchn
)
487 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
492 static inline int xen_set_ioreq_server_state(domid_t dom
,
502 static bool use_default_ioreq_server
;
504 static inline void xen_map_memory_section(domid_t dom
,
506 MemoryRegionSection
*section
)
508 hwaddr start_addr
= section
->offset_within_address_space
;
509 ram_addr_t size
= int128_get64(section
->size
);
510 hwaddr end_addr
= start_addr
+ size
- 1;
512 if (use_default_ioreq_server
) {
516 trace_xen_map_mmio_range(ioservid
, start_addr
, end_addr
);
517 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 1,
518 start_addr
, end_addr
);
521 static inline void xen_unmap_memory_section(domid_t dom
,
523 MemoryRegionSection
*section
)
525 hwaddr start_addr
= section
->offset_within_address_space
;
526 ram_addr_t size
= int128_get64(section
->size
);
527 hwaddr end_addr
= start_addr
+ size
- 1;
529 if (use_default_ioreq_server
) {
533 trace_xen_unmap_mmio_range(ioservid
, start_addr
, end_addr
);
534 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
535 1, start_addr
, end_addr
);
538 static inline void xen_map_io_section(domid_t dom
,
540 MemoryRegionSection
*section
)
542 hwaddr start_addr
= section
->offset_within_address_space
;
543 ram_addr_t size
= int128_get64(section
->size
);
544 hwaddr end_addr
= start_addr
+ size
- 1;
546 if (use_default_ioreq_server
) {
550 trace_xen_map_portio_range(ioservid
, start_addr
, end_addr
);
551 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
552 start_addr
, end_addr
);
555 static inline void xen_unmap_io_section(domid_t dom
,
557 MemoryRegionSection
*section
)
559 hwaddr start_addr
= section
->offset_within_address_space
;
560 ram_addr_t size
= int128_get64(section
->size
);
561 hwaddr end_addr
= start_addr
+ size
- 1;
563 if (use_default_ioreq_server
) {
567 trace_xen_unmap_portio_range(ioservid
, start_addr
, end_addr
);
568 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
569 0, start_addr
, end_addr
);
572 static inline void xen_map_pcidev(domid_t dom
,
576 if (use_default_ioreq_server
) {
580 trace_xen_map_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
581 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
582 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
583 pci_dev_bus_num(pci_dev
),
584 PCI_SLOT(pci_dev
->devfn
),
585 PCI_FUNC(pci_dev
->devfn
));
588 static inline void xen_unmap_pcidev(domid_t dom
,
592 if (use_default_ioreq_server
) {
596 trace_xen_unmap_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
597 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
598 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
599 pci_dev_bus_num(pci_dev
),
600 PCI_SLOT(pci_dev
->devfn
),
601 PCI_FUNC(pci_dev
->devfn
));
604 static inline void xen_create_ioreq_server(domid_t dom
,
605 ioservid_t
*ioservid
)
607 int rc
= xendevicemodel_create_ioreq_server(xen_dmod
, dom
,
608 HVM_IOREQSRV_BUFIOREQ_ATOMIC
,
612 trace_xen_ioreq_server_create(*ioservid
);
617 use_default_ioreq_server
= true;
618 trace_xen_default_ioreq_server();
621 static inline void xen_destroy_ioreq_server(domid_t dom
,
624 if (use_default_ioreq_server
) {
628 trace_xen_ioreq_server_destroy(ioservid
);
629 xendevicemodel_destroy_ioreq_server(xen_dmod
, dom
, ioservid
);
632 static inline int xen_get_ioreq_server_info(domid_t dom
,
634 xen_pfn_t
*ioreq_pfn
,
635 xen_pfn_t
*bufioreq_pfn
,
636 evtchn_port_t
*bufioreq_evtchn
)
638 if (use_default_ioreq_server
) {
639 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
644 return xendevicemodel_get_ioreq_server_info(xen_dmod
, dom
, ioservid
,
645 ioreq_pfn
, bufioreq_pfn
,
649 static inline int xen_set_ioreq_server_state(domid_t dom
,
653 if (use_default_ioreq_server
) {
657 trace_xen_ioreq_server_state(ioservid
, enable
);
658 return xendevicemodel_set_ioreq_server_state(xen_dmod
, dom
, ioservid
,
664 #ifdef CONFIG_XEN_PV_DOMAIN_BUILD
665 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40700
666 static inline int xen_domain_create(xc_interface
*xc
, uint32_t ssidref
,
667 xen_domain_handle_t handle
, uint32_t flags
,
670 return xc_domain_create(xc
, ssidref
, handle
, flags
, pdomid
);
673 static inline int xen_domain_create(xc_interface
*xc
, uint32_t ssidref
,
674 xen_domain_handle_t handle
, uint32_t flags
,
677 return xc_domain_create(xc
, ssidref
, handle
, flags
, pdomid
, NULL
);
684 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40800
686 struct xengnttab_grant_copy_segment
{
687 union xengnttab_copy_ptr
{
700 typedef struct xengnttab_grant_copy_segment xengnttab_grant_copy_segment_t
;
702 static inline int xengnttab_grant_copy(xengnttab_handle
*xgt
, uint32_t count
,
703 xengnttab_grant_copy_segment_t
*segs
)
709 #endif /* QEMU_HW_XEN_COMMON_H */