1 #ifndef QEMU_HW_XEN_NATIVE_H
2 #define QEMU_HW_XEN_NATIVE_H
4 #ifdef __XEN_INTERFACE_VERSION__
5 #error In Xen native files, include xen_native.h before other Xen headers
9 * If we have new enough libxenctrl then we do not want/need these compat
10 * interfaces, despite what the user supplied cflags might say. They
11 * must be undefined before including xenctrl.h
13 #undef XC_WANT_COMPAT_EVTCHN_API
14 #undef XC_WANT_COMPAT_GNTTAB_API
15 #undef XC_WANT_COMPAT_MAP_FOREIGN_API
20 #include "hw/xen/xen.h"
21 #include "hw/pci/pci_device.h"
22 #include "hw/xen/trace.h"
24 extern xc_interface
*xen_xc
;
27 * We don't support Xen prior to 4.7.1.
30 #include <xenforeignmemory.h>
32 extern xenforeignmemory_handle
*xen_fmem
;
34 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
36 typedef xc_interface xendevicemodel_handle
;
38 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40900 */
40 #undef XC_WANT_COMPAT_DEVICEMODEL_API
41 #include <xendevicemodel.h>
45 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100
47 static inline int xendevicemodel_relocate_memory(
48 xendevicemodel_handle
*dmod
, domid_t domid
, uint32_t size
, uint64_t src_gfn
,
54 for (i
= 0; i
< size
; i
++) {
55 unsigned long idx
= src_gfn
+ i
;
56 xen_pfn_t gpfn
= dst_gfn
+ i
;
58 rc
= xc_domain_add_to_physmap(xen_xc
, domid
, XENMAPSPACE_gmfn
, idx
,
68 static inline int xendevicemodel_pin_memory_cacheattr(
69 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t start
, uint64_t end
,
72 return xc_domain_pin_memory_cacheattr(xen_xc
, domid
, start
, end
, type
);
75 typedef void xenforeignmemory_resource_handle
;
77 #define XENMEM_resource_ioreq_server 0
79 #define XENMEM_resource_ioreq_server_frame_bufioreq 0
80 #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
82 static inline xenforeignmemory_resource_handle
*xenforeignmemory_map_resource(
83 xenforeignmemory_handle
*fmem
, domid_t domid
, unsigned int type
,
84 unsigned int id
, unsigned long frame
, unsigned long nr_frames
,
85 void **paddr
, int prot
, int flags
)
91 static inline int xenforeignmemory_unmap_resource(
92 xenforeignmemory_handle
*fmem
, xenforeignmemory_resource_handle
*fres
)
97 #endif /* CONFIG_XEN_CTRL_INTERFACE_VERSION < 41100 */
99 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41000
101 #define XEN_COMPAT_PHYSMAP
102 static inline void *xenforeignmemory_map2(xenforeignmemory_handle
*h
,
103 uint32_t dom
, void *addr
,
104 int prot
, int flags
, size_t pages
,
105 const xen_pfn_t arr
[/*pages*/],
108 assert(addr
== NULL
&& flags
== 0);
109 return xenforeignmemory_map(h
, dom
, prot
, pages
, arr
, err
);
112 static inline int xentoolcore_restrict_all(domid_t domid
)
118 static inline int xendevicemodel_shutdown(xendevicemodel_handle
*dmod
,
119 domid_t domid
, unsigned int reason
)
125 #else /* CONFIG_XEN_CTRL_INTERFACE_VERSION >= 41000 */
127 #include <xentoolcore.h>
131 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 40900
133 static inline xendevicemodel_handle
*xendevicemodel_open(
134 struct xentoollog_logger
*logger
, unsigned int open_flags
)
139 static inline int xendevicemodel_create_ioreq_server(
140 xendevicemodel_handle
*dmod
, domid_t domid
, int handle_bufioreq
,
143 return xc_hvm_create_ioreq_server(dmod
, domid
, handle_bufioreq
,
147 static inline int xendevicemodel_get_ioreq_server_info(
148 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
149 xen_pfn_t
*ioreq_pfn
, xen_pfn_t
*bufioreq_pfn
,
150 evtchn_port_t
*bufioreq_port
)
152 return xc_hvm_get_ioreq_server_info(dmod
, domid
, id
, ioreq_pfn
,
153 bufioreq_pfn
, bufioreq_port
);
156 static inline int xendevicemodel_map_io_range_to_ioreq_server(
157 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
158 uint64_t start
, uint64_t end
)
160 return xc_hvm_map_io_range_to_ioreq_server(dmod
, domid
, id
, is_mmio
,
164 static inline int xendevicemodel_unmap_io_range_from_ioreq_server(
165 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int is_mmio
,
166 uint64_t start
, uint64_t end
)
168 return xc_hvm_unmap_io_range_from_ioreq_server(dmod
, domid
, id
, is_mmio
,
172 static inline int xendevicemodel_map_pcidev_to_ioreq_server(
173 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
174 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
176 return xc_hvm_map_pcidev_to_ioreq_server(dmod
, domid
, id
, segment
,
177 bus
, device
, function
);
180 static inline int xendevicemodel_unmap_pcidev_from_ioreq_server(
181 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
,
182 uint16_t segment
, uint8_t bus
, uint8_t device
, uint8_t function
)
184 return xc_hvm_unmap_pcidev_from_ioreq_server(dmod
, domid
, id
, segment
,
185 bus
, device
, function
);
188 static inline int xendevicemodel_destroy_ioreq_server(
189 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
)
191 return xc_hvm_destroy_ioreq_server(dmod
, domid
, id
);
194 static inline int xendevicemodel_set_ioreq_server_state(
195 xendevicemodel_handle
*dmod
, domid_t domid
, ioservid_t id
, int enabled
)
197 return xc_hvm_set_ioreq_server_state(dmod
, domid
, id
, enabled
);
200 static inline int xendevicemodel_set_pci_intx_level(
201 xendevicemodel_handle
*dmod
, domid_t domid
, uint16_t segment
,
202 uint8_t bus
, uint8_t device
, uint8_t intx
, unsigned int level
)
204 return xc_hvm_set_pci_intx_level(dmod
, domid
, segment
, bus
, device
,
208 static inline int xendevicemodel_set_isa_irq_level(
209 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t irq
,
212 return xc_hvm_set_isa_irq_level(dmod
, domid
, irq
, level
);
215 static inline int xendevicemodel_set_pci_link_route(
216 xendevicemodel_handle
*dmod
, domid_t domid
, uint8_t link
, uint8_t irq
)
218 return xc_hvm_set_pci_link_route(dmod
, domid
, link
, irq
);
221 static inline int xendevicemodel_inject_msi(
222 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t msi_addr
,
225 return xc_hvm_inject_msi(dmod
, domid
, msi_addr
, msi_data
);
228 static inline int xendevicemodel_track_dirty_vram(
229 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
230 uint32_t nr
, unsigned long *dirty_bitmap
)
232 return xc_hvm_track_dirty_vram(dmod
, domid
, first_pfn
, nr
,
236 static inline int xendevicemodel_modified_memory(
237 xendevicemodel_handle
*dmod
, domid_t domid
, uint64_t first_pfn
,
240 return xc_hvm_modified_memory(dmod
, domid
, first_pfn
, nr
);
243 static inline int xendevicemodel_set_mem_type(
244 xendevicemodel_handle
*dmod
, domid_t domid
, hvmmem_type_t mem_type
,
245 uint64_t first_pfn
, uint32_t nr
)
247 return xc_hvm_set_mem_type(dmod
, domid
, mem_type
, first_pfn
, nr
);
252 extern xendevicemodel_handle
*xen_dmod
;
254 static inline int xen_set_mem_type(domid_t domid
, hvmmem_type_t type
,
255 uint64_t first_pfn
, uint32_t nr
)
257 return xendevicemodel_set_mem_type(xen_dmod
, domid
, type
, first_pfn
,
261 static inline int xen_set_pci_intx_level(domid_t domid
, uint16_t segment
,
262 uint8_t bus
, uint8_t device
,
263 uint8_t intx
, unsigned int level
)
265 return xendevicemodel_set_pci_intx_level(xen_dmod
, domid
, segment
, bus
,
266 device
, intx
, level
);
269 static inline int xen_inject_msi(domid_t domid
, uint64_t msi_addr
,
272 return xendevicemodel_inject_msi(xen_dmod
, domid
, msi_addr
, msi_data
);
275 static inline int xen_set_isa_irq_level(domid_t domid
, uint8_t irq
,
278 return xendevicemodel_set_isa_irq_level(xen_dmod
, domid
, irq
, level
);
281 static inline int xen_track_dirty_vram(domid_t domid
, uint64_t first_pfn
,
282 uint32_t nr
, unsigned long *bitmap
)
284 return xendevicemodel_track_dirty_vram(xen_dmod
, domid
, first_pfn
, nr
,
288 static inline int xen_modified_memory(domid_t domid
, uint64_t first_pfn
,
291 return xendevicemodel_modified_memory(xen_dmod
, domid
, first_pfn
, nr
);
294 static inline int xen_restrict(domid_t domid
)
297 rc
= xentoolcore_restrict_all(domid
);
298 trace_xen_domid_restrict(rc
? errno
: 0);
302 void destroy_hvm_domain(bool reboot
);
304 /* shutdown/destroy current domain because of an error */
305 void xen_shutdown_fatal_error(const char *fmt
, ...) G_GNUC_PRINTF(1, 2);
307 #ifdef HVM_PARAM_VMPORT_REGS_PFN
308 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
309 xen_pfn_t
*vmport_regs_pfn
)
313 rc
= xc_hvm_param_get(xc
, dom
, HVM_PARAM_VMPORT_REGS_PFN
, &value
);
315 *vmport_regs_pfn
= (xen_pfn_t
) value
;
320 static inline int xen_get_vmport_regs_pfn(xc_interface
*xc
, domid_t dom
,
321 xen_pfn_t
*vmport_regs_pfn
)
327 static inline int xen_get_default_ioreq_server_info(domid_t dom
,
328 xen_pfn_t
*ioreq_pfn
,
329 xen_pfn_t
*bufioreq_pfn
,
336 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_IOREQ_PFN
, ¶m
);
338 fprintf(stderr
, "failed to get HVM_PARAM_IOREQ_PFN\n");
344 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_PFN
, ¶m
);
346 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_PFN\n");
350 *bufioreq_pfn
= param
;
352 rc
= xc_get_hvm_param(xen_xc
, dom
, HVM_PARAM_BUFIOREQ_EVTCHN
,
355 fprintf(stderr
, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
359 *bufioreq_evtchn
= param
;
364 static bool use_default_ioreq_server
;
366 static inline void xen_map_memory_section(domid_t dom
,
368 MemoryRegionSection
*section
)
370 hwaddr start_addr
= section
->offset_within_address_space
;
371 ram_addr_t size
= int128_get64(section
->size
);
372 hwaddr end_addr
= start_addr
+ size
- 1;
374 if (use_default_ioreq_server
) {
378 trace_xen_map_mmio_range(ioservid
, start_addr
, end_addr
);
379 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 1,
380 start_addr
, end_addr
);
383 static inline void xen_unmap_memory_section(domid_t dom
,
385 MemoryRegionSection
*section
)
387 hwaddr start_addr
= section
->offset_within_address_space
;
388 ram_addr_t size
= int128_get64(section
->size
);
389 hwaddr end_addr
= start_addr
+ size
- 1;
391 if (use_default_ioreq_server
) {
395 trace_xen_unmap_mmio_range(ioservid
, start_addr
, end_addr
);
396 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
397 1, start_addr
, end_addr
);
400 static inline void xen_map_io_section(domid_t dom
,
402 MemoryRegionSection
*section
)
404 hwaddr start_addr
= section
->offset_within_address_space
;
405 ram_addr_t size
= int128_get64(section
->size
);
406 hwaddr end_addr
= start_addr
+ size
- 1;
408 if (use_default_ioreq_server
) {
412 trace_xen_map_portio_range(ioservid
, start_addr
, end_addr
);
413 xendevicemodel_map_io_range_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
414 start_addr
, end_addr
);
417 static inline void xen_unmap_io_section(domid_t dom
,
419 MemoryRegionSection
*section
)
421 hwaddr start_addr
= section
->offset_within_address_space
;
422 ram_addr_t size
= int128_get64(section
->size
);
423 hwaddr end_addr
= start_addr
+ size
- 1;
425 if (use_default_ioreq_server
) {
429 trace_xen_unmap_portio_range(ioservid
, start_addr
, end_addr
);
430 xendevicemodel_unmap_io_range_from_ioreq_server(xen_dmod
, dom
, ioservid
,
431 0, start_addr
, end_addr
);
434 static inline void xen_map_pcidev(domid_t dom
,
438 if (use_default_ioreq_server
) {
442 trace_xen_map_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
443 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
444 xendevicemodel_map_pcidev_to_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
445 pci_dev_bus_num(pci_dev
),
446 PCI_SLOT(pci_dev
->devfn
),
447 PCI_FUNC(pci_dev
->devfn
));
450 static inline void xen_unmap_pcidev(domid_t dom
,
454 if (use_default_ioreq_server
) {
458 trace_xen_unmap_pcidev(ioservid
, pci_dev_bus_num(pci_dev
),
459 PCI_SLOT(pci_dev
->devfn
), PCI_FUNC(pci_dev
->devfn
));
460 xendevicemodel_unmap_pcidev_from_ioreq_server(xen_dmod
, dom
, ioservid
, 0,
461 pci_dev_bus_num(pci_dev
),
462 PCI_SLOT(pci_dev
->devfn
),
463 PCI_FUNC(pci_dev
->devfn
));
466 static inline int xen_create_ioreq_server(domid_t dom
,
468 ioservid_t
*ioservid
)
470 int rc
= xendevicemodel_create_ioreq_server(xen_dmod
, dom
,
475 trace_xen_ioreq_server_create(*ioservid
);
480 use_default_ioreq_server
= true;
481 trace_xen_default_ioreq_server();
486 static inline void xen_destroy_ioreq_server(domid_t dom
,
489 if (use_default_ioreq_server
) {
493 trace_xen_ioreq_server_destroy(ioservid
);
494 xendevicemodel_destroy_ioreq_server(xen_dmod
, dom
, ioservid
);
497 static inline int xen_get_ioreq_server_info(domid_t dom
,
499 xen_pfn_t
*ioreq_pfn
,
500 xen_pfn_t
*bufioreq_pfn
,
501 evtchn_port_t
*bufioreq_evtchn
)
503 if (use_default_ioreq_server
) {
504 return xen_get_default_ioreq_server_info(dom
, ioreq_pfn
,
509 return xendevicemodel_get_ioreq_server_info(xen_dmod
, dom
, ioservid
,
510 ioreq_pfn
, bufioreq_pfn
,
514 static inline int xen_set_ioreq_server_state(domid_t dom
,
518 if (use_default_ioreq_server
) {
522 trace_xen_ioreq_server_state(ioservid
, enable
);
523 return xendevicemodel_set_ioreq_server_state(xen_dmod
, dom
, ioservid
,
527 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41500
528 static inline int xendevicemodel_set_irq_level(xendevicemodel_handle
*dmod
,
529 domid_t domid
, uint32_t irq
,
536 #if CONFIG_XEN_CTRL_INTERFACE_VERSION < 41700
537 #define GUEST_VIRTIO_MMIO_BASE xen_mk_ullong(0x02000000)
538 #define GUEST_VIRTIO_MMIO_SIZE xen_mk_ullong(0x00100000)
539 #define GUEST_VIRTIO_MMIO_SPI_FIRST 33
540 #define GUEST_VIRTIO_MMIO_SPI_LAST 43
543 #if defined(__i386__) || defined(__x86_64__)
544 #define GUEST_RAM_BANKS 2
545 #define GUEST_RAM0_BASE 0x40000000ULL /* 3GB of low RAM @ 1GB */
546 #define GUEST_RAM0_SIZE 0xc0000000ULL
547 #define GUEST_RAM1_BASE 0x0200000000ULL /* 1016GB of RAM @ 8GB */
548 #define GUEST_RAM1_SIZE 0xfe00000000ULL
551 #endif /* QEMU_HW_XEN_NATIVE_H */