2 /*--------------------------------------------------------------------*/
3 /*--- Xen Hypercalls syswrap-xen.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2012 Citrix Systems
11 ian.campbell@citrix.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #include "pub_core_basics.h"
30 #include "pub_core_vki.h"
32 #if defined(ENABLE_XEN)
34 #include "pub_core_vkiscnums.h"
35 #include "pub_core_threadstate.h"
36 #include "pub_core_aspacemgr.h"
37 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
38 #include "pub_core_transtab.h" // VG_(discard_translations)
39 #include "pub_core_xarray.h"
40 #include "pub_core_clientstate.h"
41 #include "pub_core_debuglog.h"
42 #include "pub_core_libcbase.h"
43 #include "pub_core_libcassert.h"
44 #include "pub_core_libcfile.h"
45 #include "pub_core_libcprint.h"
46 #include "pub_core_libcproc.h"
47 #include "pub_core_libcsignal.h"
48 #include "pub_core_mallocfree.h"
49 #include "pub_core_tooliface.h"
50 #include "pub_core_options.h"
51 #include "pub_core_scheduler.h"
52 #include "pub_core_signals.h"
53 #include "pub_core_syscall.h"
54 #include "pub_core_syswrap.h"
55 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
57 #include "priv_types_n_macros.h"
58 #include "priv_syswrap-generic.h"
59 #include "priv_syswrap-xen.h"
63 #define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
64 #define POST(name) static DEFN_POST_TEMPLATE(xen, name)
66 static void bad_intf_version ( ThreadId tid
,
67 SyscallArgLayout
* layout
,
68 /*MOD*/SyscallArgs
* args
,
69 /*OUT*/SyscallStatus
* status
,
71 const HChar
* hypercall
,
74 VG_(dmsg
)("WARNING: %s version %#lx not supported\n",
76 if (VG_(clo_verbosity
) > 1) {
77 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
79 VG_(dmsg
)("You may be able to write your own handler.\n");
80 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
81 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
82 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
83 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
85 SET_STATUS_Failure(VKI_ENOSYS
);
88 static void bad_subop ( ThreadId tid
,
89 SyscallArgLayout
* layout
,
90 /*MOD*/SyscallArgs
* args
,
91 /*OUT*/SyscallStatus
* status
,
93 const HChar
* hypercall
,
96 VG_(dmsg
)("WARNING: unhandled %s subop: %lu\n",
98 if (VG_(clo_verbosity
) > 1) {
99 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
101 VG_(dmsg
)("You may be able to write your own handler.\n");
102 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
103 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
104 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
105 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
107 SET_STATUS_Failure(VKI_ENOSYS
);
112 PRINT("__HYPERVISOR_memory_op ( %lu, %#lx )", ARG1
, ARG2
);
116 case VKI_XENMEM_maximum_ram_page
:
120 case VKI_XENMEM_maximum_gpfn
:
121 PRE_MEM_READ("XENMEM_maximum_gpfn domid",
122 (Addr
)ARG2
, sizeof(vki_xen_domid_t
));
125 case VKI_XENMEM_machphys_mfn_list
:
126 case VKI_XENMEM_machphys_compat_mfn_list
: {
127 struct vki_xen_machphys_mfn_list
*arg
=
128 (struct vki_xen_machphys_mfn_list
*)ARG2
;
129 PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
130 (Addr
)&arg
->max_extents
, sizeof(arg
->max_extents
));
131 PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
132 (Addr
)&arg
->extent_start
, sizeof(arg
->extent_start
));
136 case VKI_XENMEM_set_memory_map
: {
137 struct vki_xen_foreign_memory_map
*arg
=
138 (struct vki_xen_foreign_memory_map
*)ARG2
;
139 PRE_MEM_READ("XENMEM_set_memory_map domid",
140 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
141 PRE_MEM_READ("XENMEM_set_memory_map map",
142 (Addr
)&arg
->map
, sizeof(arg
->map
));
146 case VKI_XENMEM_memory_map
:
147 case VKI_XENMEM_machine_memory_map
: {
148 struct vki_xen_memory_map
*arg
=
149 (struct vki_xen_memory_map
*)ARG2
;
150 PRE_MEM_READ("XENMEM_memory_map nr_entries",
151 (Addr
)&arg
->nr_entries
, sizeof(arg
->nr_entries
));
155 case VKI_XENMEM_increase_reservation
:
156 case VKI_XENMEM_decrease_reservation
:
157 case VKI_XENMEM_populate_physmap
:
158 case VKI_XENMEM_claim_pages
: {
159 struct xen_memory_reservation
*memory_reservation
=
160 (struct xen_memory_reservation
*)ARG2
;
164 case VKI_XENMEM_increase_reservation
:
165 which
= "XENMEM_increase_reservation";
167 case VKI_XENMEM_decrease_reservation
:
168 which
= "XENMEM_decrease_reservation";
170 (Addr
)memory_reservation
->extent_start
.p
,
171 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
173 case VKI_XENMEM_populate_physmap
:
174 which
= "XENMEM_populate_physmap";
176 (Addr
)memory_reservation
->extent_start
.p
,
177 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
179 case VKI_XENMEM_claim_pages
:
180 which
= "XENMEM_claim_pages";
183 which
= "XENMEM_unknown";
188 (Addr
)&memory_reservation
->extent_start
,
189 sizeof(memory_reservation
->extent_start
));
191 (Addr
)&memory_reservation
->nr_extents
,
192 sizeof(memory_reservation
->nr_extents
));
194 (Addr
)&memory_reservation
->extent_order
,
195 sizeof(memory_reservation
->extent_order
));
197 (Addr
)&memory_reservation
->mem_flags
,
198 sizeof(memory_reservation
->mem_flags
));
200 (Addr
)&memory_reservation
->domid
,
201 sizeof(memory_reservation
->domid
));
205 case VKI_XENMEM_add_to_physmap
: {
206 struct vki_xen_add_to_physmap
*arg
=
207 (struct vki_xen_add_to_physmap
*)ARG2
;
208 PRE_MEM_READ("XENMEM_add_to_physmap domid",
209 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
210 PRE_MEM_READ("XENMEM_add_to_physmap size",
211 (Addr
)&arg
->size
, sizeof(arg
->size
));
212 PRE_MEM_READ("XENMEM_add_to_physmap space",
213 (Addr
)&arg
->space
, sizeof(arg
->space
));
214 PRE_MEM_READ("XENMEM_add_to_physmap idx",
215 (Addr
)&arg
->idx
, sizeof(arg
->idx
));
216 PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
217 (Addr
)&arg
->gpfn
, sizeof(arg
->gpfn
));
221 case VKI_XENMEM_remove_from_physmap
: {
222 struct vki_xen_remove_from_physmap
*arg
=
223 (struct vki_xen_remove_from_physmap
*)ARG2
;
224 PRE_MEM_READ("XENMEM_remove_from_physmap domid",
225 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
226 PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
227 (Addr
)&arg
->gpfn
, sizeof(arg
->gpfn
));
231 case VKI_XENMEM_get_sharing_freed_pages
:
232 case VKI_XENMEM_get_sharing_shared_pages
:
235 case VKI_XENMEM_access_op
: {
236 struct vki_xen_mem_event_op
*arg
=
237 (struct vki_xen_mem_event_op
*)ARG2
;
238 PRE_MEM_READ("XENMEM_access_op domid",
239 (Addr
)&arg
->domain
, sizeof(arg
->domain
));
240 PRE_MEM_READ("XENMEM_access_op op",
241 (Addr
)&arg
->op
, sizeof(arg
->op
));
242 PRE_MEM_READ("XENMEM_access_op gfn",
243 (Addr
)&arg
->gfn
, sizeof(arg
->gfn
));
247 bad_subop(tid
, layout
, arrghs
, status
, flags
,
248 "__HYPERVISOR_memory_op", ARG1
);
255 PRINT("__HYPERVISOR_mmuext_op ( %#lx, %ld, %#lx, %lu )",
256 ARG1
, SARG2
, ARG3
, ARG4
);
258 struct vki_xen_mmuext_op
*ops
= (struct vki_xen_mmuext_op
*)ARG1
;
259 unsigned int i
, nr
= ARG2
;
261 for (i
=0; i
<nr
; i
++) {
262 struct vki_xen_mmuext_op
*op
= ops
+ i
;
263 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
264 (Addr
)&op
->cmd
, sizeof(op
->cmd
));
266 case VKI_XEN_MMUEXT_PIN_L1_TABLE
:
267 case VKI_XEN_MMUEXT_PIN_L2_TABLE
:
268 case VKI_XEN_MMUEXT_PIN_L3_TABLE
:
269 case VKI_XEN_MMUEXT_PIN_L4_TABLE
:
270 case VKI_XEN_MMUEXT_UNPIN_TABLE
:
271 case VKI_XEN_MMUEXT_NEW_BASEPTR
:
272 case VKI_XEN_MMUEXT_CLEAR_PAGE
:
273 case VKI_XEN_MMUEXT_COPY_PAGE
:
274 case VKI_XEN_MMUEXT_MARK_SUPER
:
275 case VKI_XEN_MMUEXT_UNMARK_SUPER
:
276 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
278 sizeof(op
->arg1
.mfn
));
281 case VKI_XEN_MMUEXT_INVLPG_LOCAL
:
282 case VKI_XEN_MMUEXT_INVLPG_ALL
:
283 case VKI_XEN_MMUEXT_SET_LDT
:
284 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
285 (Addr
)&op
->arg1
.linear_addr
,
286 sizeof(op
->arg1
.linear_addr
));
289 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL
:
290 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI
:
291 case VKI_XEN_MMUEXT_INVLPG_MULTI
:
292 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL
:
293 case VKI_XEN_MMUEXT_FLUSH_CACHE
:
294 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR
:
295 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL
:
301 case VKI_XEN_MMUEXT_SET_LDT
:
302 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
303 (Addr
)&op
->arg2
.nr_ents
,
304 sizeof(op
->arg2
.nr_ents
));
307 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI
:
308 case VKI_XEN_MMUEXT_INVLPG_MULTI
:
310 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
311 (Addr
)&op
->arg2
.vcpumask
,
312 sizeof(op
->arg2
.vcpumask
));
315 case VKI_XEN_MMUEXT_COPY_PAGE
:
316 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
317 (Addr
)&op
->arg2
.src_mfn
,
318 sizeof(op
->arg2
.src_mfn
));
321 case VKI_XEN_MMUEXT_PIN_L1_TABLE
:
322 case VKI_XEN_MMUEXT_PIN_L2_TABLE
:
323 case VKI_XEN_MMUEXT_PIN_L3_TABLE
:
324 case VKI_XEN_MMUEXT_PIN_L4_TABLE
:
325 case VKI_XEN_MMUEXT_UNPIN_TABLE
:
326 case VKI_XEN_MMUEXT_NEW_BASEPTR
:
327 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL
:
328 case VKI_XEN_MMUEXT_INVLPG_LOCAL
:
329 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL
:
330 case VKI_XEN_MMUEXT_INVLPG_ALL
:
331 case VKI_XEN_MMUEXT_FLUSH_CACHE
:
332 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR
:
333 case VKI_XEN_MMUEXT_CLEAR_PAGE
:
334 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL
:
335 case VKI_XEN_MMUEXT_MARK_SUPER
:
336 case VKI_XEN_MMUEXT_UNMARK_SUPER
:
345 /* XXX assuming flask, only actual XSM right now */
346 struct vki_xen_flask_op
*op
= (struct vki_xen_flask_op
*)ARG1
;
348 PRINT("__HYPERVISOR_xsm_op ( %u )", op
->cmd
);
351 * Common part of xen_flask_op:
353 * vki_uint32_t interface_version;
355 PRE_MEM_READ("__HYPERVISOR_xsm_op", ARG1
,
356 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
));
361 switch (op
->interface_version
) {
365 bad_intf_version(tid
, layout
, arrghs
, status
, flags
,
366 "__HYPERVISOR_xsm_op", op
->interface_version
);
370 #define PRE_XEN_XSM_OP_READ(_xsm_op, _union, _field) \
371 PRE_MEM_READ("FLASK_" #_xsm_op " u." #_union "." #_field, \
372 (Addr)&op->u._union._field, \
373 sizeof(op->u._union._field))
376 case VKI_FLASK_SID_TO_CONTEXT
:
377 PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT
, sid_context
, sid
);
378 PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT
, sid_context
, size
);
379 PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT
, sid_context
, context
.p
);
382 bad_subop(tid
, layout
, arrghs
, status
, flags
,
383 "__HYPERVISOR_xsm_op", op
->cmd
);
386 #undef __PRE_XEN_XSM_OP_READ
387 #undef PRE_XEN_XSM_OP_READ
392 PRINT("__HYPERVISOR_sched_op ( %ld, %#lx )", SARG1
, ARG2
);
393 void *arg
= (void *)(unsigned long)ARG2
;
395 #define __PRE_XEN_SCHEDOP_READ(_schedop, _type, _field) \
396 PRE_MEM_READ("XEN_SCHEDOP_" # _schedop " " #_field, \
397 (Addr)&((_type*)arg)->_field, \
398 sizeof(((_type*)arg)->_field))
399 #define PRE_XEN_SCHEDOP_READ(_schedop, _field) \
400 __PRE_XEN_SCHEDOP_READ(_schedop, vki_xen_ ## _schedop ## _t, _field)
403 case VKI_XEN_SCHEDOP_remote_shutdown
:
404 PRE_XEN_SCHEDOP_READ(remote_shutdown
, domain_id
);
405 PRE_XEN_SCHEDOP_READ(remote_shutdown
, reason
);
409 bad_subop(tid
, layout
, arrghs
, status
, flags
,
410 "__HYPERVISOR_sched_op", ARG1
);
413 #undef __PRE_XEN_SCHEDOP_READ
414 #undef PRE_XEN_SCHEDOP_READ
417 static void pre_evtchn_op(ThreadId tid
,
418 SyscallArgLayout
* layout
,
419 /*MOD*/SyscallArgs
* arrghs
,
420 /*OUT*/SyscallStatus
* status
,
422 __vki_u32 cmd
, void *arg
, int compat
)
424 PRINT("__HYPERVISOR_event_channel_op%s ( %u, %p )",
425 compat
? "_compat" : "", cmd
, arg
);
428 case VKI_XEN_EVTCHNOP_alloc_unbound
: {
429 struct vki_xen_evtchn_alloc_unbound
*alloc_unbound
= arg
;
430 PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
431 (Addr
)&alloc_unbound
->dom
, sizeof(alloc_unbound
->dom
));
432 PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
433 (Addr
)&alloc_unbound
->remote_dom
,
434 sizeof(alloc_unbound
->remote_dom
));
439 bad_subop(tid
, layout
, arrghs
, status
, flags
,
440 "__HYPERVISOR_event_channel_op_compat", cmd
);
442 bad_subop(tid
, layout
, arrghs
, status
, flags
,
443 "__HYPERVISOR_event_channel_op", cmd
);
450 pre_evtchn_op(tid
, layout
, arrghs
, status
, flags
,
451 ARG1
, (void *)ARG2
, 0);
454 PRE(evtchn_op_compat
)
456 struct vki_xen_evtchn_op
*evtchn
= (struct vki_xen_evtchn_op
*)ARG1
;
457 PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
458 ARG1
, sizeof(*evtchn
));
460 pre_evtchn_op(tid
, layout
, arrghs
, status
, flags
,
461 evtchn
->cmd
, &evtchn
->u
, 1);
468 PRINT("__HYPERVISOR_physdev_op ( %ld, %#lx )", SARG1
, ARG2
);
470 #define PRE_XEN_PHYSDEVOP_READ(_op, _field) \
471 PRE_MEM_READ("XEN_PHYSDEVOP_" #_op " ." #_field, \
472 (Addr)&arg->_field, \
476 case VKI_XEN_PHYSDEVOP_map_pirq
: {
477 struct vki_xen_physdev_map_pirq
*arg
=
478 (struct vki_xen_physdev_map_pirq
*)ARG2
;
480 PRE_XEN_PHYSDEVOP_READ("map_pirq", domid
);
481 PRE_XEN_PHYSDEVOP_READ("map_pirq", type
);
483 PRE_XEN_PHYSDEVOP_READ("map_pirq", bus
);
484 PRE_XEN_PHYSDEVOP_READ("map_pirq", devfn
);
485 PRE_XEN_PHYSDEVOP_READ("map_pirq", entry_nr
);
486 PRE_XEN_PHYSDEVOP_READ("map_pirq", table_base
);
489 case VKI_XEN_MAP_PIRQ_TYPE_MSI
:
490 PRE_XEN_PHYSDEVOP_READ("map_pirq", index
);
492 case VKI_XEN_MAP_PIRQ_TYPE_GSI
:
493 PRE_XEN_PHYSDEVOP_READ("map_pirq", index
);
494 PRE_XEN_PHYSDEVOP_READ("map_pirq", pirq
);
496 case VKI_XEN_MAP_PIRQ_TYPE_MSI_SEG
:
497 PRE_XEN_PHYSDEVOP_READ("map_pirq", index
);
499 case VKI_XEN_MAP_PIRQ_TYPE_MULTI_MSI
:
504 case VKI_XEN_PHYSDEVOP_unmap_pirq
: {
505 struct vki_xen_physdev_unmap_pirq
*arg
=
506 (struct vki_xen_physdev_unmap_pirq
*)ARG2
;
507 PRE_XEN_PHYSDEVOP_READ("unmap_pirq", domid
);
508 PRE_XEN_PHYSDEVOP_READ("unmap_pirq", pirq
);
512 bad_subop(tid
, layout
, arrghs
, status
, flags
,
513 "__HYPERVISOR_physdev_op", cmd
);
515 #undef PRE_XEN_PHYSDEVOP_READ
520 PRINT("__HYPERVISOR_xen_version ( %ld, %#lx )", SARG1
, ARG2
);
523 case VKI_XENVER_version
:
524 case VKI_XENVER_extraversion
:
525 case VKI_XENVER_compile_info
:
526 case VKI_XENVER_capabilities
:
527 case VKI_XENVER_changeset
:
528 case VKI_XENVER_platform_parameters
:
529 case VKI_XENVER_get_features
:
530 case VKI_XENVER_pagesize
:
531 case VKI_XENVER_guest_handle
:
532 case VKI_XENVER_commandline
:
537 bad_subop(tid
, layout
, arrghs
, status
, flags
,
538 "__HYPERVISOR_xen_version", ARG1
);
545 PRINT("__HYPERVISOR_grant_table_op ( %lu, %#lx, %lu )", ARG1
, ARG2
, ARG3
);
547 case VKI_XEN_GNTTABOP_setup_table
: {
548 struct vki_xen_gnttab_setup_table
*gst
=
549 (struct vki_xen_gnttab_setup_table
*)ARG2
;
550 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
551 (Addr
)&gst
->dom
, sizeof(gst
->dom
));
552 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
553 (Addr
)&gst
->nr_frames
, sizeof(gst
->nr_frames
));
557 bad_subop(tid
, layout
, arrghs
, status
, flags
,
558 "__HYPERVISOR_grant_table_op", ARG1
);
564 struct vki_xen_sysctl
*sysctl
= (struct vki_xen_sysctl
*)ARG1
;
566 PRINT("__HYPERVISOR_sysctl ( %u )", sysctl
->cmd
);
569 * Common part of xen_sysctl:
571 * uint32_t interface_version;
573 PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1
,
574 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
));
579 switch (sysctl
->interface_version
)
594 bad_intf_version(tid
, layout
, arrghs
, status
, flags
,
595 "__HYPERVISOR_sysctl", sysctl
->interface_version
);
599 #define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) \
600 PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field, \
601 (Addr)&sysctl->u._union._field, \
602 sizeof(sysctl->u._union._field))
603 #define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
604 __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
606 switch (sysctl
->cmd
) {
607 case VKI_XEN_SYSCTL_readconsole
:
608 /* These are all unconditionally read */
609 PRE_XEN_SYSCTL_READ(readconsole
, clear
);
610 PRE_XEN_SYSCTL_READ(readconsole
, incremental
);
611 PRE_XEN_SYSCTL_READ(readconsole
, buffer
);
612 PRE_XEN_SYSCTL_READ(readconsole
, count
);
614 /* 'index' only read if 'incremental' is nonzero */
615 if (sysctl
->u
.readconsole
.incremental
)
616 PRE_XEN_SYSCTL_READ(readconsole
, index
);
619 case VKI_XEN_SYSCTL_getdomaininfolist
:
620 switch (sysctl
->interface_version
)
623 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, first_domain
);
624 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, max_domains
);
625 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, buffer
);
628 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, first_domain
);
629 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, max_domains
);
630 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, buffer
);
638 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, first_domain
);
639 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, max_domains
);
640 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, buffer
);
645 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010
, first_domain
);
646 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010
, max_domains
);
647 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000010
, buffer
);
650 VG_(dmsg
)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
651 "%"PRIx32
" not implemented yet\n",
652 sysctl
->interface_version
);
653 SET_STATUS_Failure(VKI_EINVAL
);
658 case VKI_XEN_SYSCTL_debug_keys
:
659 PRE_XEN_SYSCTL_READ(debug_keys
, keys
);
660 PRE_XEN_SYSCTL_READ(debug_keys
, nr_keys
);
661 PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
662 (Addr
)sysctl
->u
.debug_keys
.keys
.p
,
663 sysctl
->u
.debug_keys
.nr_keys
* sizeof(char));
666 case VKI_XEN_SYSCTL_sched_id
:
670 case VKI_XEN_SYSCTL_cpupool_op
:
671 PRE_XEN_SYSCTL_READ(cpupool_op
, op
);
673 switch(sysctl
->u
.cpupool_op
.op
) {
674 case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
:
675 case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY
:
676 case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
:
677 case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU
:
678 case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU
:
679 case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN
:
680 PRE_XEN_SYSCTL_READ(cpupool_op
, cpupool_id
);
683 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
)
684 PRE_XEN_SYSCTL_READ(cpupool_op
, sched_id
);
686 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN
)
687 PRE_XEN_SYSCTL_READ(cpupool_op
, domid
);
689 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU
||
690 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU
)
691 PRE_XEN_SYSCTL_READ(cpupool_op
, cpu
);
695 case VKI_XEN_SYSCTL_physinfo
:
696 /* No input params */
699 case VKI_XEN_SYSCTL_topologyinfo
:
700 PRE_XEN_SYSCTL_READ(topologyinfo
, max_cpu_index
);
701 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_core
);
702 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_socket
);
703 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_node
);
706 case VKI_XEN_SYSCTL_numainfo
:
707 PRE_XEN_SYSCTL_READ(numainfo
, max_node_index
);
708 PRE_XEN_SYSCTL_READ(numainfo
, node_to_memsize
);
709 PRE_XEN_SYSCTL_READ(numainfo
, node_to_memfree
);
710 PRE_XEN_SYSCTL_READ(numainfo
, node_to_node_distance
);
714 bad_subop(tid
, layout
, arrghs
, status
, flags
,
715 "__HYPERVISOR_sysctl", sysctl
->cmd
);
718 #undef PRE_XEN_SYSCTL_READ
719 #undef __PRE_XEN_SYSCTL_READ
724 struct vki_xen_domctl
*domctl
= (struct vki_xen_domctl
*)ARG1
;
726 PRINT("__HYPERVISOR_domctl ( %u ) on dom%d", domctl
->cmd
, domctl
->domain
);
729 * Common part of xen_domctl:
731 * vki_uint32_t interface_version;
732 * vki_xen_domid_t domain;
734 PRE_MEM_READ("__HYPERVISOR_domctl", ARG1
,
735 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
)
736 + sizeof(vki_xen_domid_t
));
741 switch (domctl
->interface_version
)
756 bad_intf_version(tid
, layout
, arrghs
, status
, flags
,
757 "__HYPERVISOR_domctl", domctl
->interface_version
);
761 #define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) \
762 PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field, \
763 (Addr)&domctl->u._union._field, \
764 sizeof(domctl->u._union._field))
765 #define PRE_XEN_DOMCTL_READ(_domctl, _field) \
766 __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
768 switch (domctl
->cmd
) {
769 case VKI_XEN_DOMCTL_destroydomain
:
770 case VKI_XEN_DOMCTL_pausedomain
:
771 case VKI_XEN_DOMCTL_max_vcpus
:
772 case VKI_XEN_DOMCTL_get_address_size
:
773 case VKI_XEN_DOMCTL_gettscinfo
:
774 case VKI_XEN_DOMCTL_getdomaininfo
:
775 case VKI_XEN_DOMCTL_unpausedomain
:
776 case VKI_XEN_DOMCTL_resumedomain
:
777 /* No input fields. */
780 case VKI_XEN_DOMCTL_createdomain
:
781 PRE_XEN_DOMCTL_READ(createdomain
, ssidref
);
782 PRE_XEN_DOMCTL_READ(createdomain
, handle
);
783 PRE_XEN_DOMCTL_READ(createdomain
, flags
);
786 case VKI_XEN_DOMCTL_gethvmcontext
:
787 /* Xen unconditionally reads the 'buffer' pointer */
788 __PRE_XEN_DOMCTL_READ(gethvmcontext
, hvmcontext
, buffer
);
789 /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
790 * buffer is a request for the required size. */
791 if ( domctl
->u
.hvmcontext
.buffer
.p
)
792 __PRE_XEN_DOMCTL_READ(gethvmcontext
, hvmcontext
, size
);
795 case VKI_XEN_DOMCTL_sethvmcontext
:
796 __PRE_XEN_DOMCTL_READ(sethvmcontext
, hvmcontext
, size
);
797 __PRE_XEN_DOMCTL_READ(sethvmcontext
, hvmcontext
, buffer
);
798 PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
799 (Addr
)domctl
->u
.hvmcontext
.buffer
.p
,
800 domctl
->u
.hvmcontext
.size
);
803 case VKI_XEN_DOMCTL_gethvmcontext_partial
:
804 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial_00000007
, type
);
805 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial_00000007
, instance
);
806 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial_00000007
, buffer
);
808 switch (domctl
->u
.hvmcontext_partial_00000007
.type
) {
809 case VKI_HVM_SAVE_CODE(CPU
):
810 if ( domctl
->u
.hvmcontext_partial_00000007
.buffer
.p
)
811 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
812 (Addr
)domctl
->u
.hvmcontext_partial_00000007
.buffer
.p
,
813 VKI_HVM_SAVE_LENGTH(CPU
));
815 case VKI_HVM_SAVE_CODE(MTRR
):
816 if ( domctl
->u
.hvmcontext_partial_00000007
.buffer
.p
)
817 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
818 (Addr
)domctl
->u
.hvmcontext_partial_00000007
.buffer
.p
,
819 VKI_HVM_SAVE_LENGTH(MTRR
));
822 bad_subop(tid
, layout
, arrghs
, status
, flags
,
823 "__HYPERVISOR_domctl_gethvmcontext_partial type",
824 domctl
->u
.hvmcontext_partial_00000007
.type
);
829 case VKI_XEN_DOMCTL_max_mem
:
830 PRE_XEN_DOMCTL_READ(max_mem
, max_memkb
);
833 case VKI_XEN_DOMCTL_set_address_size
:
834 __PRE_XEN_DOMCTL_READ(set_address_size
, address_size
, size
);
837 case VKI_XEN_DOMCTL_test_assign_device
:
838 switch (domctl
->interface_version
) {
839 case 0x00000007: /* pre-4.6 */
843 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_00000007
, machine_sbdf
);
853 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_0000000b
, dev
);
854 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_0000000b
, flag
);
855 switch (domctl
->u
.assign_device_0000000b
.dev
) {
856 case VKI_XEN_DOMCTL_DEV_PCI
:
857 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_0000000b
, u
.pci
);
859 case VKI_XEN_DOMCTL_DEV_DT
:
860 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_0000000b
, u
.dt
);
861 PRE_MEM_READ("XEN_DOMTCL_test_assign_device.dt",
862 (Addr
)domctl
->u
.assign_device_0000000b
.u
.dt
.path
.p
,
863 domctl
->u
.assign_device_0000000b
.u
.dt
.size
);
866 bad_subop(tid
, layout
, arrghs
, status
, flags
,
867 "__HYPERVISOR_domctl_test_assign_device dev",
868 domctl
->u
.assign_device_0000000b
.dev
);
874 case VKI_XEN_DOMCTL_assign_device
:
875 switch (domctl
->interface_version
) {
876 case 0x00000007: /* pre-4.6 */
880 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_00000007
, machine_sbdf
);
890 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_0000000b
, dev
);
891 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_0000000b
, flag
);
892 switch (domctl
->u
.assign_device_0000000b
.dev
) {
893 case VKI_XEN_DOMCTL_DEV_PCI
:
894 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_0000000b
, u
.pci
);
896 case VKI_XEN_DOMCTL_DEV_DT
:
897 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_0000000b
, u
.dt
);
898 PRE_MEM_READ("XEN_DOMTCL_assign_device.dt",
899 (Addr
)domctl
->u
.assign_device_0000000b
.u
.dt
.path
.p
,
900 domctl
->u
.assign_device_0000000b
.u
.dt
.size
);
903 bad_subop(tid
, layout
, arrghs
, status
, flags
,
904 "__HYPERVISOR_domctl_assign_device dev",
905 domctl
->u
.assign_device_0000000b
.dev
);
911 case VKI_XEN_DOMCTL_deassign_device
:
912 switch (domctl
->interface_version
) {
913 case 0x00000007: /* pre-4.6 */
917 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_00000007
, machine_sbdf
);
927 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_0000000b
, dev
);
928 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_0000000b
, flag
);
929 switch (domctl
->u
.assign_device_0000000b
.dev
) {
930 case VKI_XEN_DOMCTL_DEV_PCI
:
931 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_0000000b
, u
.pci
);
933 case VKI_XEN_DOMCTL_DEV_DT
:
934 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_0000000b
, u
.dt
);
935 PRE_MEM_READ("XEN_DOMTCL_assign_device.dt",
936 (Addr
)domctl
->u
.assign_device_0000000b
.u
.dt
.path
.p
,
937 domctl
->u
.assign_device_0000000b
.u
.dt
.size
);
940 bad_subop(tid
, layout
, arrghs
, status
, flags
,
941 "__HYPERVISOR_domctl_deassign_device dev",
942 domctl
->u
.assign_device_0000000b
.dev
);
949 case VKI_XEN_DOMCTL_settscinfo
:
950 switch (domctl
->interface_version
) {
951 case 0x00000007: /* pre-4.6 */
955 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_00000007
, info
.tsc_mode
);
956 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_00000007
, info
.gtsc_khz
);
957 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_00000007
, info
.incarnation
);
958 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_00000007
, info
.elapsed_nsec
);
968 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_0000000b
, tsc_mode
);
969 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_0000000b
, gtsc_khz
);
970 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_0000000b
, incarnation
);
971 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_0000000b
, elapsed_nsec
);
976 case VKI_XEN_DOMCTL_irq_permission
:
977 PRE_XEN_DOMCTL_READ(irq_permission
, pirq
);
978 PRE_XEN_DOMCTL_READ(irq_permission
, allow_access
);
981 case VKI_XEN_DOMCTL_iomem_permission
:
982 PRE_XEN_DOMCTL_READ(iomem_permission
, first_mfn
);
983 PRE_XEN_DOMCTL_READ(iomem_permission
, nr_mfns
);
984 PRE_XEN_DOMCTL_READ(iomem_permission
, allow_access
);
987 case VKI_XEN_DOMCTL_ioport_permission
:
988 PRE_XEN_DOMCTL_READ(ioport_permission
, first_port
);
989 PRE_XEN_DOMCTL_READ(ioport_permission
, nr_ports
);
990 PRE_XEN_DOMCTL_READ(ioport_permission
, allow_access
);
993 case VKI_XEN_DOMCTL_hypercall_init
:
994 PRE_XEN_DOMCTL_READ(hypercall_init
, gmfn
);
997 case VKI_XEN_DOMCTL_settimeoffset
:
998 PRE_XEN_DOMCTL_READ(settimeoffset
, time_offset_seconds
);
1001 case VKI_XEN_DOMCTL_getvcpuinfo
:
1002 PRE_XEN_DOMCTL_READ(getvcpuinfo
, vcpu
);
1005 case VKI_XEN_DOMCTL_scheduler_op
:
1006 PRE_XEN_DOMCTL_READ(scheduler_op
, sched_id
);
1007 PRE_XEN_DOMCTL_READ(scheduler_op
, cmd
);
1008 if ( domctl
->u
.scheduler_op
.cmd
== VKI_XEN_DOMCTL_SCHEDOP_putinfo
) {
1009 switch(domctl
->u
.scheduler_op
.sched_id
) {
1010 case VKI_XEN_SCHEDULER_SEDF
:
1011 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.period
);
1012 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.slice
);
1013 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.latency
);
1014 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.extratime
);
1015 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.weight
);
1017 case VKI_XEN_SCHEDULER_CREDIT
:
1018 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit
.weight
);
1019 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit
.cap
);
1021 case VKI_XEN_SCHEDULER_CREDIT2
:
1022 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit2
.weight
);
1024 case VKI_XEN_SCHEDULER_RTDS
:
1025 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.rtds
.period
);
1026 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.rtds
.budget
);
1028 case VKI_XEN_SCHEDULER_ARINC653
:
1034 case VKI_XEN_DOMCTL_getvcpuaffinity
:
1035 switch (domctl
->interface_version
) {
1039 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_00000009
, vcpu
);
1040 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_00000009
, cpumap
.nr_bits
);
1051 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_0000000a
, vcpu
);
1052 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
)
1053 __PRE_XEN_DOMCTL_READ(
1054 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_hard
.nr_bits
);
1055 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
)
1056 __PRE_XEN_DOMCTL_READ(
1057 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_soft
.nr_bits
);
1062 case VKI_XEN_DOMCTL_setvcpuaffinity
:
1063 switch (domctl
->interface_version
) {
1067 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_00000009
, vcpu
);
1068 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_00000009
, cpumap
.nr_bits
);
1069 PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
1070 (Addr
)domctl
->u
.vcpuaffinity_00000009
.cpumap
.bitmap
.p
,
1071 domctl
->u
.vcpuaffinity_00000009
.cpumap
.nr_bits
/ 8);
1082 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_0000000a
, vcpu
);
1083 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_0000000a
, flags
);
1084 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
) {
1085 __PRE_XEN_DOMCTL_READ(
1086 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_hard
.nr_bits
);
1088 "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_hard.bitmap",
1089 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.bitmap
.p
,
1090 domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.nr_bits
/ 8);
1092 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
) {
1093 __PRE_XEN_DOMCTL_READ(
1094 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_soft
.nr_bits
);
1096 "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_soft.bitmap",
1097 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.bitmap
.p
,
1098 domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.nr_bits
/ 8);
1104 case VKI_XEN_DOMCTL_getnodeaffinity
:
1105 __PRE_XEN_DOMCTL_READ(nodeaffinity
, nodeaffinity
, nodemap
.nr_bits
);
1107 case VKI_XEN_DOMCTL_setnodeaffinity
:
1108 __PRE_XEN_DOMCTL_READ(nodeaffinity
, nodeaffinity
, nodemap
.nr_bits
);
1109 PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
1110 (Addr
)domctl
->u
.nodeaffinity
.nodemap
.bitmap
.p
,
1111 domctl
->u
.nodeaffinity
.nodemap
.nr_bits
/ 8);
1114 case VKI_XEN_DOMCTL_getvcpucontext
:
1115 __PRE_XEN_DOMCTL_READ(getvcpucontext
, vcpucontext
, vcpu
);
1118 case VKI_XEN_DOMCTL_setvcpucontext
:
1119 __PRE_XEN_DOMCTL_READ(setvcpucontext
, vcpucontext
, vcpu
);
1120 __PRE_XEN_DOMCTL_READ(setvcpucontext
, vcpucontext
, ctxt
.p
);
1123 case VKI_XEN_DOMCTL_pin_mem_cacheattr
:
1124 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr
, start
);
1125 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr
, end
);
1126 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr
, type
);
1129 case VKI_XEN_DOMCTL_get_ext_vcpucontext
:
1130 switch (domctl
->interface_version
)
1134 __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext
, ext_vcpucontext_00000008
, vcpu
);
1147 __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext
, ext_vcpucontext_00000009
, vcpu
);
1151 VG_(dmsg
)("WARNING: VKI_XEN_DOMCTL_get_ext_vcpucontext domctl version %#"
1152 PRIx32
" not implemented\n", domctl
->interface_version
);
1153 SET_STATUS_Failure(VKI_EINVAL
);
1158 case VKI_XEN_DOMCTL_set_ext_vcpucontext
:
1159 switch (domctl
->interface_version
)
1163 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
, vcpu
);
1164 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
, size
);
1165 #if defined(__i386__) || defined(__x86_64__)
1166 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1167 syscall32_callback_eip
);
1168 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1169 sysenter_callback_eip
);
1170 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1171 syscall32_callback_cs
);
1172 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1173 sysenter_callback_cs
);
1174 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1175 syscall32_disables_events
);
1176 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1177 sysenter_disables_events
);
1179 if ( domctl
->u
.ext_vcpucontext_00000008
.size
>=
1180 offsetof(struct vki_xen_domctl_ext_vcpucontext_00000008
, mcg_cap
) )
1181 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1196 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
, vcpu
);
1197 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
, size
);
1198 #if defined(__i386__) || defined(__x86_64__)
1199 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1200 syscall32_callback_eip
);
1201 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1202 sysenter_callback_eip
);
1203 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1204 syscall32_callback_cs
);
1205 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1206 sysenter_callback_cs
);
1207 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1208 syscall32_disables_events
);
1209 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1210 sysenter_disables_events
);
1212 if ( domctl
->u
.ext_vcpucontext_00000009
.size
>=
1213 offsetof(struct vki_xen_domctl_ext_vcpucontext_00000009
, caps
) )
1215 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1217 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1219 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1226 VG_(dmsg
)("WARNING: VKI_XEN_DOMCTL_set_ext_vcpucontext domctl version %#"
1227 PRIx32
" not implemented\n", domctl
->interface_version
);
1228 SET_STATUS_Failure(VKI_EINVAL
);
1233 case VKI_XEN_DOMCTL_set_cpuid
:
1234 PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
1235 (Addr
)&domctl
->u
.cpuid
, sizeof(domctl
->u
.cpuid
));
1238 case VKI_XEN_DOMCTL_getpageframeinfo3
:
1239 PRE_XEN_DOMCTL_READ(getpageframeinfo3
, num
);
1240 PRE_XEN_DOMCTL_READ(getpageframeinfo3
, array
.p
);
1241 PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
1242 (Addr
)domctl
->u
.getpageframeinfo3
.array
.p
,
1243 domctl
->u
.getpageframeinfo3
.num
* sizeof(vki_xen_pfn_t
));
1246 case VKI_XEN_DOMCTL_setvcpuextstate
:
1247 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, vcpu
);
1248 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, size
);
1249 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, buffer
);
1250 PRE_MEM_READ("XEN_DOMCTL_setvcpuextstate *u.vcpuextstate.buffer.p",
1251 (Addr
)domctl
->u
.vcpuextstate
.buffer
.p
,
1252 domctl
->u
.vcpuextstate
.size
);
1255 case VKI_XEN_DOMCTL_getvcpuextstate
:
1256 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, vcpu
);
1257 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, xfeature_mask
);
1258 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, size
);
1259 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, buffer
);
1262 case VKI_XEN_DOMCTL_shadow_op
:
1263 PRE_XEN_DOMCTL_READ(shadow_op
, op
);
1265 switch(domctl
->u
.shadow_op
.op
)
1267 case VKI_XEN_DOMCTL_SHADOW_OP_OFF
:
1268 case VKI_XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION
:
1269 /* No further inputs */
1272 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE
:
1273 PRE_XEN_DOMCTL_READ(shadow_op
, mode
);
1274 switch(domctl
->u
.shadow_op
.mode
)
1276 case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY
:
1277 goto domctl_shadow_op_enable_logdirty
;
1281 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1282 "__HYPERVISOR_domctl shadowop mode",
1283 domctl
->u
.shadow_op
.mode
);
1287 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY
:
1288 domctl_shadow_op_enable_logdirty
:
1289 /* No further inputs */
1292 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN
:
1293 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK
:
1294 PRE_XEN_DOMCTL_READ(shadow_op
, dirty_bitmap
);
1295 PRE_XEN_DOMCTL_READ(shadow_op
, pages
);
1298 case VKI_XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION
:
1299 PRE_XEN_DOMCTL_READ(shadow_op
, mb
);
1303 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1304 "__HYPERVISOR_domctl shadow(10)",
1305 domctl
->u
.shadow_op
.op
);
1310 case VKI_XEN_DOMCTL_set_max_evtchn
:
1311 PRE_XEN_DOMCTL_READ(set_max_evtchn
, max_port
);
1314 case VKI_XEN_DOMCTL_cacheflush
:
1315 PRE_XEN_DOMCTL_READ(cacheflush
, start_pfn
);
1316 PRE_XEN_DOMCTL_READ(cacheflush
, nr_pfns
);
1319 case VKI_XEN_DOMCTL_set_access_required
:
1320 PRE_XEN_DOMCTL_READ(access_required
, access_required
);
1323 case VKI_XEN_DOMCTL_mem_event_op
:
1324 //case VKI_XEN_DOMCTL_vm_event_op: /* name change in 4.6 */
1325 switch (domctl
->interface_version
) {
1326 case 0x00000007: /* pre-4.6 */
1330 __PRE_XEN_DOMCTL_READ(mem_event_op
, mem_event_op_00000007
, op
);
1331 __PRE_XEN_DOMCTL_READ(mem_event_op
, mem_event_op_00000007
, mode
);
1340 __PRE_XEN_DOMCTL_READ(vm_event_op
, vm_event_op_0000000b
, op
);
1341 __PRE_XEN_DOMCTL_READ(vm_event_op
, vm_event_op_0000000b
, mode
);
1344 __PRE_XEN_DOMCTL_READ(vm_event_op
, vm_event_op_00000012
, op
);
1345 __PRE_XEN_DOMCTL_READ(vm_event_op
, vm_event_op_00000012
, mode
);
1346 __PRE_XEN_DOMCTL_READ(vm_event_op
, vm_event_op_00000012
, u
.enable
);
1351 case VKI_XEN_DOMCTL_debug_op
:
1352 PRE_XEN_DOMCTL_READ(debug_op
, op
);
1353 PRE_XEN_DOMCTL_READ(debug_op
, vcpu
);
1356 case VKI_XEN_DOMCTL_get_vcpu_msrs
:
1357 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs
, vcpu_msrs
, vcpu
);
1358 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs
, vcpu_msrs
, msr_count
);
1359 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs
, vcpu_msrs
, msrs
);
1362 case VKI_XEN_DOMCTL_set_vcpu_msrs
:
1363 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs
, vcpu_msrs
, vcpu
);
1364 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs
, vcpu_msrs
, msr_count
);
1365 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs
, vcpu_msrs
, msrs
);
1366 PRE_MEM_READ("XEN_DOMCTL_set_vcpu_msrs *u.vcpu_msrs.msrs.p",
1367 (Addr
)domctl
->u
.vcpu_msrs
.msrs
.p
,
1368 sizeof(vki_xen_domctl_vcpu_msr_t
) *
1369 domctl
->u
.vcpu_msrs
.msr_count
);
1372 case VKI_XEN_DOMCTL_monitor_op
:
1373 switch (domctl
->interface_version
) {
1380 if (domctl
->u
.monitor_op_0000000b
.op
== VKI_XEN_DOMCTL_MONITOR_OP_ENABLE
||
1381 domctl
->u
.monitor_op_0000000b
.op
== VKI_XEN_DOMCTL_MONITOR_OP_DISABLE
) {
1382 switch (domctl
->u
.monitor_op_0000000b
.event
) {
1383 case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG
:
1384 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_0000000b
, u
.mov_to_cr
);
1386 case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR
:
1387 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_0000000b
, u
.mov_to_msr
);
1389 case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST
:
1390 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_0000000b
, u
.guest_request
);
1392 case VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES
:
1400 if (domctl
->u
.monitor_op_00000011
.op
== VKI_XEN_DOMCTL_MONITOR_OP_ENABLE
||
1401 domctl
->u
.monitor_op_00000011
.op
== VKI_XEN_DOMCTL_MONITOR_OP_DISABLE
) {
1402 switch (domctl
->u
.monitor_op_00000011
.event
) {
1403 case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG
:
1404 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_00000011
, u
.mov_to_cr
);
1406 case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR
:
1407 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_00000011
, u
.mov_to_msr
);
1409 case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST
:
1410 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_00000011
, u
.guest_request
);
1412 case VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES
:
1422 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1423 "__HYPERVISOR_domctl", domctl
->cmd
);
1426 #undef PRE_XEN_DOMCTL_READ
1427 #undef __PRE_XEN_DOMCTL_READ
1432 unsigned long op
= ARG1
;
1433 void *arg
= (void *)(unsigned long)ARG2
;
1435 PRINT("__HYPERVISOR_hvm_op ( %ld, %#lx )", SARG1
, ARG2
);
1437 #define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) \
1438 PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field, \
1439 (Addr)&((_type*)arg)->_field, \
1440 sizeof(((_type*)arg)->_field))
1441 #define PRE_XEN_HVMOP_READ(_hvm_op, _field) \
1442 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1445 case VKI_XEN_HVMOP_set_param
:
1446 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, domid
);
1447 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, index
);
1448 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, value
);
1451 case VKI_XEN_HVMOP_get_param
:
1452 __PRE_XEN_HVMOP_READ(get_param
, struct vki_xen_hvm_param
, domid
);
1453 __PRE_XEN_HVMOP_READ(get_param
, struct vki_xen_hvm_param
, index
);
1456 case VKI_XEN_HVMOP_set_pci_intx_level
:
1457 PRE_XEN_HVMOP_READ(set_pci_intx_level
, domid
);
1458 PRE_XEN_HVMOP_READ(set_pci_intx_level
, domain
);
1459 PRE_XEN_HVMOP_READ(set_pci_intx_level
, bus
);
1460 PRE_XEN_HVMOP_READ(set_pci_intx_level
, device
);
1461 PRE_XEN_HVMOP_READ(set_pci_intx_level
, level
);
1464 case VKI_XEN_HVMOP_set_isa_irq_level
:
1465 PRE_XEN_HVMOP_READ(set_isa_irq_level
, domid
);
1466 PRE_XEN_HVMOP_READ(set_isa_irq_level
, isa_irq
);
1467 PRE_XEN_HVMOP_READ(set_isa_irq_level
, level
);
1470 case VKI_XEN_HVMOP_set_pci_link_route
:
1471 PRE_XEN_HVMOP_READ(set_pci_link_route
, domid
);
1472 PRE_XEN_HVMOP_READ(set_pci_link_route
, link
);
1473 PRE_XEN_HVMOP_READ(set_pci_link_route
, isa_irq
);
1476 case VKI_XEN_HVMOP_track_dirty_vram
: {
1477 vki_xen_hvm_track_dirty_vram_t
*Arg
=
1478 (vki_xen_hvm_track_dirty_vram_t
*)ARG2
;
1479 PRE_XEN_HVMOP_READ(track_dirty_vram
, domid
);
1480 PRE_XEN_HVMOP_READ(track_dirty_vram
, nr
);
1482 PRE_XEN_HVMOP_READ(track_dirty_vram
, first_pfn
);
1483 PRE_XEN_HVMOP_READ(track_dirty_vram
, dirty_bitmap
);
1488 case VKI_XEN_HVMOP_set_mem_type
:
1489 PRE_XEN_HVMOP_READ(set_mem_type
, domid
);
1490 PRE_XEN_HVMOP_READ(set_mem_type
, hvmmem_type
);
1491 PRE_XEN_HVMOP_READ(set_mem_type
, nr
);
1492 PRE_XEN_HVMOP_READ(set_mem_type
, first_pfn
);
1495 case VKI_XEN_HVMOP_set_mem_access
:
1496 PRE_XEN_HVMOP_READ(set_mem_access
, domid
);
1497 PRE_XEN_HVMOP_READ(set_mem_access
, hvmmem_access
);
1498 PRE_XEN_HVMOP_READ(set_mem_access
, first_pfn
);
1499 /* if default access */
1500 if ( ((vki_xen_hvm_set_mem_access_t
*)arg
)->first_pfn
!= ~0ULL)
1501 PRE_XEN_HVMOP_READ(set_mem_access
, nr
);
1504 case VKI_XEN_HVMOP_get_mem_access
:
1505 PRE_XEN_HVMOP_READ(get_mem_access
, domid
);
1506 PRE_XEN_HVMOP_READ(get_mem_access
, pfn
);
1508 PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
1509 (Addr
)&(((vki_xen_hvm_get_mem_access_t
*)arg
)->hvmmem_access
),
1510 sizeof(vki_uint16_t
));
1513 case VKI_XEN_HVMOP_inject_trap
:
1514 PRE_XEN_HVMOP_READ(inject_trap
, domid
);
1515 PRE_XEN_HVMOP_READ(inject_trap
, vcpuid
);
1516 PRE_XEN_HVMOP_READ(inject_trap
, vector
);
1517 PRE_XEN_HVMOP_READ(inject_trap
, type
);
1518 PRE_XEN_HVMOP_READ(inject_trap
, error_code
);
1519 PRE_XEN_HVMOP_READ(inject_trap
, insn_len
);
1520 PRE_XEN_HVMOP_READ(inject_trap
, cr2
);
1523 case VKI_XEN_HVMOP_altp2m
: {
1524 vki_xen_hvm_altp2m_op_t
*altp2m_op
= (vki_xen_hvm_altp2m_op_t
*)arg
;
1526 PRE_XEN_HVMOP_READ(altp2m_op
, version
);
1527 PRE_XEN_HVMOP_READ(altp2m_op
, cmd
);
1528 PRE_XEN_HVMOP_READ(altp2m_op
, domain
);
1529 PRE_XEN_HVMOP_READ(altp2m_op
, pad1
);
1530 PRE_XEN_HVMOP_READ(altp2m_op
, pad2
);
1532 switch (altp2m_op
->cmd
) {
1533 case VKI_XEN_HVMOP_altp2m_get_domain_state
:
1534 case VKI_XEN_HVMOP_altp2m_set_domain_state
:
1535 PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr
)&(altp2m_op
->u
.domain_state
.state
), sizeof(vki_uint8_t
));
1537 case VKI_XEN_HVMOP_altp2m_create_p2m
:
1538 case VKI_XEN_HVMOP_altp2m_destroy_p2m
:
1539 case VKI_XEN_HVMOP_altp2m_switch_p2m
:
1540 PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr
)&(altp2m_op
->u
.view
.view
), sizeof(vki_uint16_t
));
1541 PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr
)&(altp2m_op
->u
.view
.hvmmem_default_access
), sizeof(vki_uint16_t
));
1543 case VKI_XEN_HVMOP_altp2m_change_gfn
:
1544 PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr
)&(altp2m_op
->u
.change_gfn
.view
), sizeof(vki_uint16_t
));
1545 PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr
)&(altp2m_op
->u
.change_gfn
.pad1
), sizeof(vki_uint16_t
));
1546 PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr
)&(altp2m_op
->u
.change_gfn
.pad2
), sizeof(vki_uint32_t
));
1547 PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr
)&(altp2m_op
->u
.change_gfn
.old_gfn
), sizeof(vki_uint64_t
));
1548 PRE_MEM_READ("XEN_HVMOP_altp2m_op", (Addr
)&(altp2m_op
->u
.change_gfn
.new_gfn
), sizeof(vki_uint64_t
));
1556 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1557 "__HYPERVISOR_hvm_op", op
);
1560 #undef __PRE_XEN_HVMOP_READ
1561 #undef PRE_XEN_HVMOP_READ
1566 struct vki_xen_tmem_op
*tmem
= (struct vki_xen_tmem_op
*)ARG1
;
1568 PRINT("__HYPERVISOR_tmem_op ( %u )", tmem
->cmd
);
1570 /* Common part for xen_tmem_op:
1573 PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1
, sizeof(vki_uint32_t
));
1576 #define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field) \
1577 PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field, \
1578 (Addr)&tmem->u._union._field, \
1579 sizeof(tmem->u._union._field))
1580 #define PRE_XEN_TMEMOP_READ(_tmem, _field) \
1581 __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
1585 case VKI_XEN_TMEM_control
:
1587 /* Common part for control hypercall:
1588 * vki_int32_t pool_id;
1589 * vki_uint32_t subop;
1591 PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
1592 (Addr
)&tmem
->pool_id
, sizeof(tmem
->pool_id
));
1593 PRE_XEN_TMEMOP_READ(ctrl
, subop
);
1595 switch (tmem
->u
.ctrl
.subop
) {
1597 case VKI_XEN_TMEMC_save_begin
:
1598 PRE_XEN_TMEMOP_READ(ctrl
, cli_id
);
1599 PRE_XEN_TMEMOP_READ(ctrl
, arg1
);
1600 PRE_XEN_TMEMOP_READ(ctrl
, buf
);
1604 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1605 "__HYPERVISOR_tmem_op_control", tmem
->u
.ctrl
.subop
);
1611 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1612 "__HYPERVISOR_tmem_op", ARG1
);
1615 #undef PRE_XEN_TMEMOP_READ
1616 #undef __PRE_XEN_TMEMOP_READ
1622 case VKI_XENMEM_maximum_ram_page
:
1623 case VKI_XENMEM_set_memory_map
:
1624 case VKI_XENMEM_decrease_reservation
:
1625 case VKI_XENMEM_claim_pages
:
1626 case VKI_XENMEM_maximum_gpfn
:
1627 case VKI_XENMEM_remove_from_physmap
:
1628 case VKI_XENMEM_access_op
:
1631 case VKI_XENMEM_increase_reservation
:
1632 case VKI_XENMEM_populate_physmap
: {
1633 struct xen_memory_reservation
*memory_reservation
=
1634 (struct xen_memory_reservation
*)ARG2
;
1636 POST_MEM_WRITE((Addr
)memory_reservation
->extent_start
.p
,
1637 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
1641 case VKI_XENMEM_machphys_mfn_list
:
1642 case VKI_XENMEM_machphys_compat_mfn_list
: {
1643 struct vki_xen_machphys_mfn_list
*arg
=
1644 (struct vki_xen_machphys_mfn_list
*)ARG2
;
1645 POST_MEM_WRITE((Addr
)&arg
->nr_extents
, sizeof(arg
->nr_extents
));
1646 POST_MEM_WRITE((Addr
)arg
->extent_start
.p
,
1647 sizeof(vki_xen_pfn_t
) * arg
->nr_extents
);
1651 case VKI_XENMEM_memory_map
:
1652 case VKI_XENMEM_machine_memory_map
: {
1653 struct vki_xen_memory_map
*arg
=
1654 (struct vki_xen_memory_map
*)ARG2
;
1655 POST_MEM_WRITE(arg
->nr_entries
, sizeof(arg
->nr_entries
));
1656 POST_MEM_WRITE((Addr
)arg
->buffer
.p
,
1657 arg
->nr_entries
* 20 /* size of an e820 entry */);
1661 case VKI_XENMEM_add_to_physmap
: {
1662 struct vki_xen_add_to_physmap
*arg
=
1663 (struct vki_xen_add_to_physmap
*)ARG2
;
1664 if (arg
->space
== VKI_XENMAPSPACE_gmfn_range
)
1665 POST_MEM_WRITE(ARG2
, sizeof(*arg
));
1668 case VKI_XENMEM_get_sharing_freed_pages
:
1669 case VKI_XENMEM_get_sharing_shared_pages
:
1677 unsigned int *pdone
= (unsigned int *)ARG3
;
1679 POST_MEM_WRITE((Addr
)pdone
, sizeof(*pdone
));
1684 /* XXX assuming flask, only actual XSM right now */
1685 struct vki_xen_flask_op
*op
= (struct vki_xen_flask_op
*)ARG1
;
1687 switch (op
->interface_version
) {
1694 #define POST_XEN_XSM_OP_WRITE(_xsm_op, _union, _field) \
1695 POST_MEM_WRITE((Addr)&op->u._union._field, \
1696 sizeof(op->u._union._field))
1699 case VKI_FLASK_SID_TO_CONTEXT
:
1700 POST_XEN_XSM_OP_WRITE(SID_TO_CONTEXT
, sid_context
, size
);
1701 POST_MEM_WRITE((Addr
)op
->u
.sid_context
.context
.p
,
1702 op
->u
.sid_context
.size
);
1706 static void post_evtchn_op(ThreadId tid
, __vki_u32 cmd
, void *arg
, int compat
)
1709 case VKI_XEN_EVTCHNOP_alloc_unbound
: {
1710 struct vki_xen_evtchn_alloc_unbound
*alloc_unbound
= arg
;
1711 POST_MEM_WRITE((Addr
)&alloc_unbound
->port
, sizeof(alloc_unbound
->port
));
1720 case VKI_XEN_SCHEDOP_remote_shutdown
:
1728 post_evtchn_op(tid
, ARG1
, (void *)ARG2
, 0);
1731 POST(evtchn_op_compat
)
1733 struct vki_xen_evtchn_op
*evtchn
= (struct vki_xen_evtchn_op
*)ARG1
;
1734 post_evtchn_op(tid
, evtchn
->cmd
, &evtchn
->u
, 1);
1741 #define POST_XEN_PHYSDEVOP_WRITE(_op, _field) \
1742 POST_MEM_WRITE((Addr)&arg->_field, sizeof(arg->_field))
1745 case VKI_XEN_PHYSDEVOP_unmap_pirq
:
1749 case VKI_XEN_PHYSDEVOP_map_pirq
: {
1750 struct vki_xen_physdev_map_pirq
*arg
=
1751 (struct vki_xen_physdev_map_pirq
*)ARG2
;
1752 if (arg
->type
== VKI_XEN_MAP_PIRQ_TYPE_MULTI_MSI
)
1753 POST_XEN_PHYSDEVOP_WRITE("map_pirq", entry_nr
);
1754 POST_XEN_PHYSDEVOP_WRITE("map_pirq", pirq
);
1757 #undef POST_XEN_PHYSDEVOP_WRITE
1767 case VKI_XENVER_version
:
1770 case VKI_XENVER_extraversion
:
1771 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_extraversion_t
));
1773 case VKI_XENVER_compile_info
:
1774 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_compile_info
));
1776 case VKI_XENVER_capabilities
:
1777 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_capabilities_info_t
));
1779 case VKI_XENVER_changeset
:
1780 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_changeset_info_t
));
1782 case VKI_XENVER_platform_parameters
:
1783 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_platform_parameters
));
1785 case VKI_XENVER_get_features
:
1786 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_feature_info
));
1788 case VKI_XENVER_pagesize
:
1791 case VKI_XENVER_guest_handle
:
1792 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_domain_handle_t
));
1794 case VKI_XENVER_commandline
:
1795 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_commandline_t
));
1800 POST(grant_table_op
)
1803 case VKI_XEN_GNTTABOP_setup_table
: {
1804 struct vki_xen_gnttab_setup_table
*gst
=
1805 (struct vki_xen_gnttab_setup_table
*)ARG2
;
1806 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1807 (Addr
)&gst
->status
, sizeof(gst
->status
));
1808 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1809 (Addr
)gst
->frame_list
.p
,
1810 sizeof(*gst
->frame_list
.p
) & gst
->nr_frames
);
1818 struct vki_xen_sysctl
*sysctl
= (struct vki_xen_sysctl
*)ARG1
;
1820 switch (sysctl
->interface_version
)
1838 #define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
1839 POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
1840 sizeof(sysctl->u._union._field))
1841 #define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1842 __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1844 switch (sysctl
->cmd
) {
1845 case VKI_XEN_SYSCTL_readconsole
:
1846 POST_MEM_WRITE((Addr
)sysctl
->u
.readconsole
.buffer
.p
,
1847 sysctl
->u
.readconsole
.count
* sizeof(char));
1850 case VKI_XEN_SYSCTL_getdomaininfolist
:
1851 switch (sysctl
->interface_version
)
1854 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008
, num_domains
);
1855 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000008
.buffer
.p
,
1856 sizeof(*sysctl
->u
.getdomaininfolist_00000008
.buffer
.p
)
1857 * sysctl
->u
.getdomaininfolist_00000008
.num_domains
);
1860 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009
, num_domains
);
1861 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000009
.buffer
.p
,
1862 sizeof(*sysctl
->u
.getdomaininfolist_00000009
.buffer
.p
)
1863 * sysctl
->u
.getdomaininfolist_00000009
.num_domains
);
1871 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a
, num_domains
);
1872 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_0000000a
.buffer
.p
,
1873 sizeof(*sysctl
->u
.getdomaininfolist_0000000a
.buffer
.p
)
1874 * sysctl
->u
.getdomaininfolist_0000000a
.num_domains
);
1879 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000010
, num_domains
);
1880 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000010
.buffer
.p
,
1881 sizeof(*sysctl
->u
.getdomaininfolist_00000010
.buffer
.p
)
1882 * sysctl
->u
.getdomaininfolist_00000010
.num_domains
);
1887 case VKI_XEN_SYSCTL_sched_id
:
1888 POST_XEN_SYSCTL_WRITE(sched_id
, sched_id
);
1891 case VKI_XEN_SYSCTL_cpupool_op
:
1892 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
||
1893 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
)
1894 POST_XEN_SYSCTL_WRITE(cpupool_op
, cpupool_id
);
1895 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
) {
1896 POST_XEN_SYSCTL_WRITE(cpupool_op
, sched_id
);
1897 POST_XEN_SYSCTL_WRITE(cpupool_op
, n_dom
);
1899 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
||
1900 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO
)
1901 POST_XEN_SYSCTL_WRITE(cpupool_op
, cpumap
);
1904 case VKI_XEN_SYSCTL_physinfo
:
1905 switch (sysctl
->interface_version
)
1908 case 0x00000009: /* Unchanged from version 8 */
1909 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, threads_per_core
);
1910 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, cores_per_socket
);
1911 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, nr_cpus
);
1912 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, max_cpu_id
);
1913 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, nr_nodes
);
1914 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, max_node_id
);
1915 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, cpu_khz
);
1916 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, total_pages
);
1917 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, free_pages
);
1918 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, scrub_pages
);
1919 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, hw_cap
[8]);
1920 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, capabilities
);
1928 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, threads_per_core
);
1929 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, cores_per_socket
);
1930 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, nr_cpus
);
1931 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, max_cpu_id
);
1932 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, nr_nodes
);
1933 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, max_node_id
);
1934 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, cpu_khz
);
1935 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, total_pages
);
1936 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, free_pages
);
1937 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, scrub_pages
);
1938 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, outstanding_pages
);
1939 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, hw_cap
[8]);
1940 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, capabilities
);
1945 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, threads_per_core
);
1946 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, cores_per_socket
);
1947 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, nr_cpus
);
1948 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, max_cpu_id
);
1949 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, nr_nodes
);
1950 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, max_node_id
);
1951 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, cpu_khz
);
1952 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, capabilities
);
1953 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, total_pages
);
1954 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, free_pages
);
1955 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, scrub_pages
);
1956 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, outstanding_pages
);
1957 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, max_mfn
);
1958 POST_XEN_SYSCTL_WRITE(physinfo_00000010
, hw_cap
[8]);
1962 case VKI_XEN_SYSCTL_topologyinfo
:
1963 POST_XEN_SYSCTL_WRITE(topologyinfo
, max_cpu_index
);
1964 if (sysctl
->u
.topologyinfo
.cpu_to_core
.p
)
1965 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_core
.p
,
1966 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1967 if (sysctl
->u
.topologyinfo
.cpu_to_socket
.p
)
1968 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_socket
.p
,
1969 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1970 if (sysctl
->u
.topologyinfo
.cpu_to_node
.p
)
1971 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_node
.p
,
1972 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1975 case VKI_XEN_SYSCTL_numainfo
:
1976 POST_XEN_SYSCTL_WRITE(numainfo
, max_node_index
);
1977 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_memsize
.p
,
1978 sizeof(uint64_t) * sysctl
->u
.numainfo
.max_node_index
);
1979 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_memfree
.p
,
1980 sizeof(uint64_t) * sysctl
->u
.numainfo
.max_node_index
);
1981 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_node_distance
.p
,
1982 sizeof(uint32_t) * sysctl
->u
.numainfo
.max_node_index
);
1986 case VKI_XEN_SYSCTL_debug_keys
:
1989 #undef POST_XEN_SYSCTL_WRITE
1990 #undef __POST_XEN_SYSCTL_WRITE
1994 struct vki_xen_domctl
*domctl
= (struct vki_xen_domctl
*)ARG1
;
1996 switch (domctl
->interface_version
) {
2014 #define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
2015 POST_MEM_WRITE((Addr)&domctl->u._union._field, \
2016 sizeof(domctl->u._union._field));
2017 #define POST_XEN_DOMCTL_WRITE(_domctl, _field) \
2018 __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
2020 switch (domctl
->cmd
) {
2021 case VKI_XEN_DOMCTL_createdomain
:
2022 case VKI_XEN_DOMCTL_destroydomain
:
2023 case VKI_XEN_DOMCTL_pausedomain
:
2024 case VKI_XEN_DOMCTL_max_mem
:
2025 case VKI_XEN_DOMCTL_setvcpuextstate
:
2026 case VKI_XEN_DOMCTL_set_address_size
:
2027 case VKI_XEN_DOMCTL_test_assign_device
:
2028 case VKI_XEN_DOMCTL_assign_device
:
2029 case VKI_XEN_DOMCTL_deassign_device
:
2030 case VKI_XEN_DOMCTL_settscinfo
:
2031 case VKI_XEN_DOMCTL_irq_permission
:
2032 case VKI_XEN_DOMCTL_iomem_permission
:
2033 case VKI_XEN_DOMCTL_ioport_permission
:
2034 case VKI_XEN_DOMCTL_hypercall_init
:
2035 case VKI_XEN_DOMCTL_setvcpucontext
:
2036 case VKI_XEN_DOMCTL_pin_mem_cacheattr
:
2037 case VKI_XEN_DOMCTL_set_ext_vcpucontext
:
2038 case VKI_XEN_DOMCTL_setnodeaffinity
:
2039 case VKI_XEN_DOMCTL_set_cpuid
:
2040 case VKI_XEN_DOMCTL_unpausedomain
:
2041 case VKI_XEN_DOMCTL_sethvmcontext
:
2042 case VKI_XEN_DOMCTL_debug_op
:
2043 case VKI_XEN_DOMCTL_set_max_evtchn
:
2044 case VKI_XEN_DOMCTL_cacheflush
:
2045 case VKI_XEN_DOMCTL_resumedomain
:
2046 case VKI_XEN_DOMCTL_set_vcpu_msrs
:
2047 case VKI_XEN_DOMCTL_set_access_required
:
2048 /* No output fields */
2051 case VKI_XEN_DOMCTL_max_vcpus
:
2052 POST_XEN_DOMCTL_WRITE(max_vcpus
, max
);
2055 case VKI_XEN_DOMCTL_get_address_size
:
2056 __POST_XEN_DOMCTL_WRITE(get_address_size
, address_size
, size
);
2059 case VKI_XEN_DOMCTL_gettscinfo
:
2060 switch (domctl
->interface_version
) {
2061 case 0x00000007: /* pre-4.6 */
2065 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_00000007
, out_info
);
2066 POST_MEM_WRITE((Addr
)domctl
->u
.tsc_info_00000007
.out_info
.p
,
2067 sizeof(vki_xen_guest_tsc_info_t
));
2077 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_0000000b
, tsc_mode
);
2078 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_0000000b
, gtsc_khz
);
2079 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_0000000b
, incarnation
);
2080 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_0000000b
, elapsed_nsec
);
2086 case VKI_XEN_DOMCTL_getvcpuinfo
:
2087 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, online
);
2088 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, blocked
);
2089 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, running
);
2090 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, cpu_time
);
2091 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, cpu
);
2094 case VKI_XEN_DOMCTL_gethvmcontext
:
2095 /* Xen unconditionally writes size... */
2096 __POST_XEN_DOMCTL_WRITE(gethvmcontext
, hvmcontext
, size
);
2097 /* ...but only writes to the buffer if it was non NULL */
2098 if ( domctl
->u
.hvmcontext
.buffer
.p
)
2099 POST_MEM_WRITE((Addr
)domctl
->u
.hvmcontext
.buffer
.p
,
2100 sizeof(*domctl
->u
.hvmcontext
.buffer
.p
)
2101 * domctl
->u
.hvmcontext
.size
);
2104 case VKI_XEN_DOMCTL_gethvmcontext_partial
:
2105 switch (domctl
->u
.hvmcontext_partial_00000007
.type
) {
2106 case VKI_HVM_SAVE_CODE(CPU
):
2107 if ( domctl
->u
.hvmcontext_partial_00000007
.buffer
.p
)
2108 POST_MEM_WRITE((Addr
)domctl
->u
.hvmcontext_partial_00000007
.buffer
.p
,
2109 VKI_HVM_SAVE_LENGTH(CPU
));
2114 case VKI_XEN_DOMCTL_scheduler_op
:
2115 if ( domctl
->u
.scheduler_op
.cmd
== VKI_XEN_DOMCTL_SCHEDOP_getinfo
) {
2116 switch(domctl
->u
.scheduler_op
.sched_id
) {
2117 case VKI_XEN_SCHEDULER_SEDF
:
2118 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.period
);
2119 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.slice
);
2120 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.latency
);
2121 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.extratime
);
2122 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.weight
);
2124 case VKI_XEN_SCHEDULER_CREDIT
:
2125 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit
.weight
);
2126 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit
.cap
);
2128 case VKI_XEN_SCHEDULER_CREDIT2
:
2129 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit2
.weight
);
2131 case VKI_XEN_SCHEDULER_ARINC653
:
2133 case VKI_XEN_SCHEDULER_RTDS
:
2134 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.rtds
.period
);
2135 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.rtds
.budget
);
2141 case VKI_XEN_DOMCTL_getvcpuaffinity
:
2142 case VKI_XEN_DOMCTL_setvcpuaffinity
: /* Writes back actual result */
2143 switch (domctl
->interface_version
) {
2147 POST_MEM_WRITE((Addr
)domctl
->u
.vcpuaffinity_00000009
.cpumap
.bitmap
.p
,
2148 domctl
->u
.vcpuaffinity_00000009
.cpumap
.nr_bits
/ 8);
2159 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
)
2161 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.bitmap
.p
,
2162 domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.nr_bits
/ 8);
2163 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
)
2165 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.bitmap
.p
,
2166 domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.nr_bits
/ 8);
2170 case VKI_XEN_DOMCTL_getnodeaffinity
:
2171 POST_MEM_WRITE((Addr
)domctl
->u
.nodeaffinity
.nodemap
.bitmap
.p
,
2172 domctl
->u
.nodeaffinity
.nodemap
.nr_bits
/ 8);
2175 case VKI_XEN_DOMCTL_getdomaininfo
:
2176 switch (domctl
->interface_version
) {
2178 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, domain
);
2179 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, flags
);
2180 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, tot_pages
);
2181 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, max_pages
);
2182 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, shr_pages
);
2183 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, shared_info_frame
);
2184 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, cpu_time
);
2185 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, nr_online_vcpus
);
2186 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, max_vcpu_id
);
2187 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, ssidref
);
2188 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, handle
);
2189 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, cpupool
);
2192 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, domain
);
2193 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, flags
);
2194 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, tot_pages
);
2195 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, max_pages
);
2196 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, shr_pages
);
2197 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, paged_pages
);
2198 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, shared_info_frame
);
2199 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, cpu_time
);
2200 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, nr_online_vcpus
);
2201 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, max_vcpu_id
);
2202 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, ssidref
);
2203 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, handle
);
2204 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, cpupool
);
2216 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, domain
);
2217 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, flags
);
2218 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, tot_pages
);
2219 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, max_pages
);
2220 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, outstanding_pages
);
2221 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, shr_pages
);
2222 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, paged_pages
);
2223 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, shared_info_frame
);
2224 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, cpu_time
);
2225 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, nr_online_vcpus
);
2226 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, max_vcpu_id
);
2227 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, ssidref
);
2228 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, handle
);
2229 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, cpupool
);
2233 case VKI_XEN_DOMCTL_getvcpucontext
:
2234 __POST_XEN_DOMCTL_WRITE(getvcpucontext
, vcpucontext
, ctxt
.p
);
2237 case VKI_XEN_DOMCTL_getpageframeinfo3
:
2238 POST_MEM_WRITE((Addr
)domctl
->u
.getpageframeinfo3
.array
.p
,
2239 domctl
->u
.getpageframeinfo3
.num
* sizeof(vki_xen_pfn_t
));
2242 case VKI_XEN_DOMCTL_get_ext_vcpucontext
:
2243 switch (domctl
->interface_version
)
2247 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
, size
);
2248 #if defined(__i386__) || defined(__x86_64__)
2249 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2250 syscall32_callback_eip
);
2251 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2252 sysenter_callback_eip
);
2253 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2254 syscall32_callback_cs
);
2255 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2256 sysenter_callback_cs
);
2257 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2258 syscall32_disables_events
);
2259 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2260 sysenter_disables_events
);
2262 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2268 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
, size
);
2269 #if defined(__i386__) || defined(__x86_64__)
2270 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2271 syscall32_callback_eip
);
2272 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2273 sysenter_callback_eip
);
2274 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2275 syscall32_callback_cs
);
2276 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2277 sysenter_callback_cs
);
2278 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2279 syscall32_disables_events
);
2280 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2281 sysenter_disables_events
);
2283 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2285 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2287 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2295 case VKI_XEN_DOMCTL_getvcpuextstate
:
2296 if (domctl
->u
.vcpuextstate
.buffer
.p
)
2297 POST_MEM_WRITE((Addr
)domctl
->u
.vcpuextstate
.buffer
.p
,
2298 domctl
->u
.vcpuextstate
.size
);
2301 case VKI_XEN_DOMCTL_shadow_op
:
2302 switch(domctl
->u
.shadow_op
.op
)
2304 case VKI_XEN_DOMCTL_SHADOW_OP_OFF
:
2305 case VKI_XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION
:
2309 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN
:
2310 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK
:
2311 POST_XEN_DOMCTL_WRITE(shadow_op
, pages
);
2312 POST_XEN_DOMCTL_WRITE(shadow_op
, stats
.fault_count
);
2313 POST_XEN_DOMCTL_WRITE(shadow_op
, stats
.dirty_count
);
2314 if(domctl
->u
.shadow_op
.dirty_bitmap
.p
)
2315 POST_MEM_WRITE((Addr
)domctl
->u
.shadow_op
.dirty_bitmap
.p
,
2316 domctl
->u
.shadow_op
.pages
* sizeof(vki_uint8_t
));
2319 case VKI_XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION
:
2320 POST_XEN_DOMCTL_WRITE(shadow_op
, mb
);
2327 case VKI_XEN_DOMCTL_get_vcpu_msrs
:
2328 if (domctl
->u
.vcpu_msrs
.msrs
.p
)
2329 POST_MEM_WRITE((Addr
)domctl
->u
.vcpu_msrs
.msrs
.p
,
2330 sizeof(vki_xen_domctl_vcpu_msr_t
) *
2331 domctl
->u
.vcpu_msrs
.msr_count
);
2334 case VKI_XEN_DOMCTL_mem_event_op
:
2335 //case VKI_XEN_DOMCTL_vm_event_op: /* name change in 4.6 */
2336 switch (domctl
->interface_version
) {
2337 case 0x00000007: /* pre-4.6 */
2341 __POST_XEN_DOMCTL_WRITE(mem_event_op
, mem_event_op_00000007
, port
);
2350 __POST_XEN_DOMCTL_WRITE(vm_event_op
, vm_event_op_0000000b
, port
);
2353 __POST_XEN_DOMCTL_WRITE(vm_event_op
, vm_event_op_00000012
, u
.enable
.port
);
2358 case VKI_XEN_DOMCTL_monitor_op
:
2359 switch (domctl
->interface_version
) {
2361 if (domctl
->u
.monitor_op_0000000b
.op
== VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES
) {
2362 switch(domctl
->u
.monitor_op_0000000b
.event
) {
2363 case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG
:
2364 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_0000000b
, u
.mov_to_cr
);
2366 case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR
:
2367 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_0000000b
, u
.mov_to_msr
);
2369 case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST
:
2370 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_0000000b
, u
.guest_request
);
2377 if (domctl
->u
.monitor_op_00000011
.op
== VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES
) {
2378 switch(domctl
->u
.monitor_op_00000011
.event
) {
2379 case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG
:
2380 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_00000011
, u
.mov_to_cr
);
2382 case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR
:
2383 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_00000011
, u
.mov_to_msr
);
2385 case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST
:
2386 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_00000011
, u
.guest_request
);
2396 #undef POST_XEN_DOMCTL_WRITE
2397 #undef __POST_XEN_DOMCTL_WRITE
2402 unsigned long op
= ARG1
;
2403 void *arg
= (void *)(unsigned long)ARG2
;
2405 #define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) \
2406 POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
2407 sizeof(((_type*)arg)->_field))
2408 #define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
2409 __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
2412 case VKI_XEN_HVMOP_set_param
:
2413 case VKI_XEN_HVMOP_set_pci_intx_level
:
2414 case VKI_XEN_HVMOP_set_isa_irq_level
:
2415 case VKI_XEN_HVMOP_set_pci_link_route
:
2416 case VKI_XEN_HVMOP_set_mem_type
:
2417 case VKI_XEN_HVMOP_set_mem_access
:
2418 case VKI_XEN_HVMOP_inject_trap
:
2419 /* No output parameters */
2422 case VKI_XEN_HVMOP_get_param
:
2423 __POST_XEN_HVMOP_WRITE(get_param
, struct vki_xen_hvm_param
, value
);
2426 case VKI_XEN_HVMOP_get_mem_access
:
2427 POST_XEN_HVMOP_WRITE(get_mem_access
, hvmmem_access
);
2430 #undef __POST_XEN_HVMOP_WRITE
2431 #undef POST_XEN_HVMOP_WRITE
2436 struct vki_xen_tmem_op
*tmem
= (struct vki_xen_tmem_op
*)ARG1
;
2440 case VKI_XEN_TMEM_control
:
2442 switch(tmem
->u
.ctrl
.subop
) {
2444 case VKI_XEN_TMEMC_save_begin
:
2454 SyscallTableEntry entry
;
2457 XenHypercallTableEntry
;
2459 #define HYPX_(const, name, nr_args) \
2460 [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
2461 #define HYPXY(const, name, nr_args) \
2462 [const] = { { vgSysWrap_xen_##name##_before, \
2463 vgSysWrap_xen_##name##_after }, \
2466 static XenHypercallTableEntry hypercall_table
[] = {
2467 // __VKI_XEN_set_trap_table // 0
2468 // __VKI_XEN_mmu_update // 1
2469 // __VKI_XEN_set_gdt // 2
2470 // __VKI_XEN_stack_switch // 3
2471 // __VKI_XEN_set_callbacks // 4
2473 // __VKI_XEN_fpu_taskswitch // 5
2474 // __VKI_XEN_sched_op_compat // 6
2475 // __VKI_XEN_platform_op // 7
2476 // __VKI_XEN_set_debugreg // 8
2477 // __VKI_XEN_get_debugreg // 9
2479 // __VKI_XEN_update_descriptor // 10
2481 HYPXY(__VKI_XEN_memory_op
, memory_op
, 2), // 12
2482 // __VKI_XEN_multicall // 13
2483 // __VKI_XEN_update_va_mapping // 14
2485 // __VKI_XEN_set_timer_op // 15
2486 HYPXY(__VKI_XEN_event_channel_op_compat
, evtchn_op_compat
, 1), // 16
2487 HYPXY(__VKI_XEN_xen_version
, xen_version
, 2), // 17
2488 // __VKI_XEN_console_io // 18
2489 // __VKI_XEN_physdev_op_compat // 19
2491 HYPXY(__VKI_XEN_grant_table_op
, grant_table_op
, 3), // 20
2492 // __VKI_XEN_vm_assist // 21
2493 // __VKI_XEN_update_va_mapping_otherdomain // 22
2494 // __VKI_XEN_iret, iret // 23
2495 // __VKI_XEN_vcpu_op, vcpu_op // 24
2497 // __VKI_XEN_set_segment_base // 25
2498 HYPXY(__VKI_XEN_mmuext_op
, mmuext_op
, 2), // 26
2499 HYPXY(__VKI_XEN_xsm_op
, xsm_op
, 1), // 27
2500 // __VKI_XEN_nmi_op // 28
2501 HYPXY(__VKI_XEN_sched_op
, sched_op
, 2), // 29
2503 // __VKI_XEN_callback_op // 30
2504 // __VKI_XEN_xenoprof_op // 31
2505 HYPXY(__VKI_XEN_event_channel_op
, evtchn_op
, 2), // 32
2506 HYPXY(__VKI_XEN_physdev_op
, physdev_op
, 2), // 33
2507 HYPXY(__VKI_XEN_hvm_op
, hvm_op
, 2), // 34
2509 HYPXY(__VKI_XEN_sysctl
, sysctl
, 1), // 35
2510 HYPXY(__VKI_XEN_domctl
, domctl
, 1), // 36
2511 // __VKI_XEN_kexec_op // 37
2512 HYPXY(__VKI_XEN_tmem_op
, tmem_op
, 1), // 38
2515 static void bad_before ( ThreadId tid
,
2516 SyscallArgLayout
* layout
,
2517 /*MOD*/SyscallArgs
* args
,
2518 /*OUT*/SyscallStatus
* status
,
2519 /*OUT*/UWord
* flags
)
2521 VG_(dmsg
)("WARNING: unhandled hypercall: %s\n",
2522 VG_SYSNUM_STRING(args
->sysno
));
2523 if (VG_(clo_verbosity
) > 1) {
2524 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
2526 VG_(dmsg
)("You may be able to write your own handler.\n");
2527 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
2528 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
2529 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
2530 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
2532 SET_STATUS_Failure(VKI_ENOSYS
);
2535 static XenHypercallTableEntry bad_hyper
=
2536 { { bad_before
, NULL
}, 0 };
2538 static XenHypercallTableEntry
* ML_(get_xen_hypercall_entry
) ( UInt sysno
)
2540 XenHypercallTableEntry
*ret
= &bad_hyper
;
2542 const UInt hypercall_table_size
2543 = sizeof(hypercall_table
) / sizeof(hypercall_table
[0]);
2545 /* Is it in the contiguous initial section of the table? */
2546 if (sysno
< hypercall_table_size
) {
2547 XenHypercallTableEntry
* ent
= &hypercall_table
[sysno
];
2548 if (ent
->entry
.before
!= NULL
)
2552 /* Can't find a wrapper */
2556 DEFN_PRE_TEMPLATE(xen
, hypercall
)
2558 XenHypercallTableEntry
*ent
= ML_(get_xen_hypercall_entry
)(SYSNO
);
2560 /* Return number of arguments consumed */
2561 ARG8
= ent
->nr_args
;
2564 vg_assert(ent
->entry
.before
);
2565 (ent
->entry
.before
)( tid
, layout
, arrghs
, status
, flags
);
2569 DEFN_POST_TEMPLATE(xen
, hypercall
)
2571 XenHypercallTableEntry
*ent
= ML_(get_xen_hypercall_entry
)(SYSNO
);
2573 /* Return number of arguments consumed */
2574 ARG8
= ent
->nr_args
;
2577 if (ent
->entry
.after
)
2578 (ent
->entry
.after
)( tid
, arrghs
, status
);
2581 #endif // defined(ENABLE_XEN)