2 /*--------------------------------------------------------------------*/
3 /*--- Xen Hypercalls syswrap-xen.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2012 Citrix Systems
11 ian.campbell@citrix.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
34 #if defined(ENABLE_XEN)
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
40 #include "pub_core_transtab.h" // VG_(discard_translations)
41 #include "pub_core_xarray.h"
42 #include "pub_core_clientstate.h"
43 #include "pub_core_debuglog.h"
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcassert.h"
46 #include "pub_core_libcfile.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_mallocfree.h"
51 #include "pub_core_tooliface.h"
52 #include "pub_core_options.h"
53 #include "pub_core_scheduler.h"
54 #include "pub_core_signals.h"
55 #include "pub_core_syscall.h"
56 #include "pub_core_syswrap.h"
57 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
59 #include "priv_types_n_macros.h"
60 #include "priv_syswrap-generic.h"
61 #include "priv_syswrap-xen.h"
65 #define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
66 #define POST(name) static DEFN_POST_TEMPLATE(xen, name)
68 static void bad_intf_version ( ThreadId tid
,
69 SyscallArgLayout
* layout
,
70 /*MOD*/SyscallArgs
* args
,
71 /*OUT*/SyscallStatus
* status
,
73 const HChar
* hypercall
,
76 VG_(dmsg
)("WARNING: %s version %#lx not supported\n",
78 if (VG_(clo_verbosity
) > 1) {
79 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
81 VG_(dmsg
)("You may be able to write your own handler.\n");
82 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
83 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
84 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
85 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
87 SET_STATUS_Failure(VKI_ENOSYS
);
90 static void bad_subop ( ThreadId tid
,
91 SyscallArgLayout
* layout
,
92 /*MOD*/SyscallArgs
* args
,
93 /*OUT*/SyscallStatus
* status
,
95 const HChar
* hypercall
,
98 VG_(dmsg
)("WARNING: unhandled %s subop: %lu\n",
100 if (VG_(clo_verbosity
) > 1) {
101 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
103 VG_(dmsg
)("You may be able to write your own handler.\n");
104 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
105 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
106 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
107 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
109 SET_STATUS_Failure(VKI_ENOSYS
);
114 PRINT("__HYPERVISOR_memory_op ( %lu, %#lx )", ARG1
, ARG2
);
118 case VKI_XENMEM_maximum_ram_page
:
122 case VKI_XENMEM_maximum_gpfn
:
123 PRE_MEM_READ("XENMEM_maximum_gpfn domid",
124 (Addr
)ARG2
, sizeof(vki_xen_domid_t
));
127 case VKI_XENMEM_machphys_mfn_list
:
128 case VKI_XENMEM_machphys_compat_mfn_list
: {
129 struct vki_xen_machphys_mfn_list
*arg
=
130 (struct vki_xen_machphys_mfn_list
*)ARG2
;
131 PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
132 (Addr
)&arg
->max_extents
, sizeof(arg
->max_extents
));
133 PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
134 (Addr
)&arg
->extent_start
, sizeof(arg
->extent_start
));
138 case VKI_XENMEM_set_memory_map
: {
139 struct vki_xen_foreign_memory_map
*arg
=
140 (struct vki_xen_foreign_memory_map
*)ARG2
;
141 PRE_MEM_READ("XENMEM_set_memory_map domid",
142 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
143 PRE_MEM_READ("XENMEM_set_memory_map map",
144 (Addr
)&arg
->map
, sizeof(arg
->map
));
148 case VKI_XENMEM_memory_map
:
149 case VKI_XENMEM_machine_memory_map
: {
150 struct vki_xen_memory_map
*arg
=
151 (struct vki_xen_memory_map
*)ARG2
;
152 PRE_MEM_READ("XENMEM_memory_map nr_entries",
153 (Addr
)&arg
->nr_entries
, sizeof(arg
->nr_entries
));
157 case VKI_XENMEM_increase_reservation
:
158 case VKI_XENMEM_decrease_reservation
:
159 case VKI_XENMEM_populate_physmap
:
160 case VKI_XENMEM_claim_pages
: {
161 struct xen_memory_reservation
*memory_reservation
=
162 (struct xen_memory_reservation
*)ARG2
;
166 case VKI_XENMEM_increase_reservation
:
167 which
= "XENMEM_increase_reservation";
169 case VKI_XENMEM_decrease_reservation
:
170 which
= "XENMEM_decrease_reservation";
172 (Addr
)memory_reservation
->extent_start
.p
,
173 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
175 case VKI_XENMEM_populate_physmap
:
176 which
= "XENMEM_populate_physmap";
178 (Addr
)memory_reservation
->extent_start
.p
,
179 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
181 case VKI_XENMEM_claim_pages
:
182 which
= "XENMEM_claim_pages";
185 which
= "XENMEM_unknown";
190 (Addr
)&memory_reservation
->extent_start
,
191 sizeof(memory_reservation
->extent_start
));
193 (Addr
)&memory_reservation
->nr_extents
,
194 sizeof(memory_reservation
->nr_extents
));
196 (Addr
)&memory_reservation
->extent_order
,
197 sizeof(memory_reservation
->extent_order
));
199 (Addr
)&memory_reservation
->mem_flags
,
200 sizeof(memory_reservation
->mem_flags
));
202 (Addr
)&memory_reservation
->domid
,
203 sizeof(memory_reservation
->domid
));
207 case VKI_XENMEM_add_to_physmap
: {
208 struct vki_xen_add_to_physmap
*arg
=
209 (struct vki_xen_add_to_physmap
*)ARG2
;
210 PRE_MEM_READ("XENMEM_add_to_physmap domid",
211 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
212 PRE_MEM_READ("XENMEM_add_to_physmap size",
213 (Addr
)&arg
->size
, sizeof(arg
->size
));
214 PRE_MEM_READ("XENMEM_add_to_physmap space",
215 (Addr
)&arg
->space
, sizeof(arg
->space
));
216 PRE_MEM_READ("XENMEM_add_to_physmap idx",
217 (Addr
)&arg
->idx
, sizeof(arg
->idx
));
218 PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
219 (Addr
)&arg
->gpfn
, sizeof(arg
->gpfn
));
223 case VKI_XENMEM_remove_from_physmap
: {
224 struct vki_xen_remove_from_physmap
*arg
=
225 (struct vki_xen_remove_from_physmap
*)ARG2
;
226 PRE_MEM_READ("XENMEM_remove_from_physmap domid",
227 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
228 PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
229 (Addr
)&arg
->gpfn
, sizeof(arg
->gpfn
));
233 case VKI_XENMEM_get_sharing_freed_pages
:
234 case VKI_XENMEM_get_sharing_shared_pages
:
237 case VKI_XENMEM_access_op
: {
238 struct vki_xen_mem_event_op
*arg
=
239 (struct vki_xen_mem_event_op
*)ARG2
;
240 PRE_MEM_READ("XENMEM_access_op domid",
241 (Addr
)&arg
->domain
, sizeof(arg
->domain
));
242 PRE_MEM_READ("XENMEM_access_op op",
243 (Addr
)&arg
->op
, sizeof(arg
->op
));
244 PRE_MEM_READ("XENMEM_access_op gfn",
245 (Addr
)&arg
->gfn
, sizeof(arg
->gfn
));
249 bad_subop(tid
, layout
, arrghs
, status
, flags
,
250 "__HYPERVISOR_memory_op", ARG1
);
257 PRINT("__HYPERVISOR_mmuext_op ( %#lx, %ld, %#lx, %lu )",
258 ARG1
, SARG2
, ARG3
, ARG4
);
260 struct vki_xen_mmuext_op
*ops
= (struct vki_xen_mmuext_op
*)ARG1
;
261 unsigned int i
, nr
= ARG2
;
263 for (i
=0; i
<nr
; i
++) {
264 struct vki_xen_mmuext_op
*op
= ops
+ i
;
265 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
266 (Addr
)&op
->cmd
, sizeof(op
->cmd
));
268 case VKI_XEN_MMUEXT_PIN_L1_TABLE
:
269 case VKI_XEN_MMUEXT_PIN_L2_TABLE
:
270 case VKI_XEN_MMUEXT_PIN_L3_TABLE
:
271 case VKI_XEN_MMUEXT_PIN_L4_TABLE
:
272 case VKI_XEN_MMUEXT_UNPIN_TABLE
:
273 case VKI_XEN_MMUEXT_NEW_BASEPTR
:
274 case VKI_XEN_MMUEXT_CLEAR_PAGE
:
275 case VKI_XEN_MMUEXT_COPY_PAGE
:
276 case VKI_XEN_MMUEXT_MARK_SUPER
:
277 case VKI_XEN_MMUEXT_UNMARK_SUPER
:
278 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
280 sizeof(op
->arg1
.mfn
));
283 case VKI_XEN_MMUEXT_INVLPG_LOCAL
:
284 case VKI_XEN_MMUEXT_INVLPG_ALL
:
285 case VKI_XEN_MMUEXT_SET_LDT
:
286 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
287 (Addr
)&op
->arg1
.linear_addr
,
288 sizeof(op
->arg1
.linear_addr
));
291 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL
:
292 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI
:
293 case VKI_XEN_MMUEXT_INVLPG_MULTI
:
294 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL
:
295 case VKI_XEN_MMUEXT_FLUSH_CACHE
:
296 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR
:
297 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL
:
303 case VKI_XEN_MMUEXT_SET_LDT
:
304 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
305 (Addr
)&op
->arg2
.nr_ents
,
306 sizeof(op
->arg2
.nr_ents
));
309 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI
:
310 case VKI_XEN_MMUEXT_INVLPG_MULTI
:
312 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
313 (Addr
)&op
->arg2
.vcpumask
,
314 sizeof(op
->arg2
.vcpumask
));
317 case VKI_XEN_MMUEXT_COPY_PAGE
:
318 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
319 (Addr
)&op
->arg2
.src_mfn
,
320 sizeof(op
->arg2
.src_mfn
));
323 case VKI_XEN_MMUEXT_PIN_L1_TABLE
:
324 case VKI_XEN_MMUEXT_PIN_L2_TABLE
:
325 case VKI_XEN_MMUEXT_PIN_L3_TABLE
:
326 case VKI_XEN_MMUEXT_PIN_L4_TABLE
:
327 case VKI_XEN_MMUEXT_UNPIN_TABLE
:
328 case VKI_XEN_MMUEXT_NEW_BASEPTR
:
329 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL
:
330 case VKI_XEN_MMUEXT_INVLPG_LOCAL
:
331 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL
:
332 case VKI_XEN_MMUEXT_INVLPG_ALL
:
333 case VKI_XEN_MMUEXT_FLUSH_CACHE
:
334 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR
:
335 case VKI_XEN_MMUEXT_CLEAR_PAGE
:
336 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL
:
337 case VKI_XEN_MMUEXT_MARK_SUPER
:
338 case VKI_XEN_MMUEXT_UNMARK_SUPER
:
347 /* XXX assuming flask, only actual XSM right now */
348 struct vki_xen_flask_op
*op
= (struct vki_xen_flask_op
*)ARG1
;
350 PRINT("__HYPERVISOR_xsm_op ( %u )", op
->cmd
);
353 * Common part of xen_flask_op:
355 * vki_uint32_t interface_version;
357 PRE_MEM_READ("__HYPERVISOR_xsm_op", ARG1
,
358 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
));
363 switch (op
->interface_version
) {
367 bad_intf_version(tid
, layout
, arrghs
, status
, flags
,
368 "__HYPERVISOR_xsm_op", op
->interface_version
);
372 #define PRE_XEN_XSM_OP_READ(_xsm_op, _union, _field) \
373 PRE_MEM_READ("FLASK_" #_xsm_op " u." #_union "." #_field, \
374 (Addr)&op->u._union._field, \
375 sizeof(op->u._union._field))
378 case VKI_FLASK_SID_TO_CONTEXT
:
379 PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT
, sid_context
, sid
);
380 PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT
, sid_context
, size
);
381 PRE_XEN_XSM_OP_READ(SID_TO_CONTEXT
, sid_context
, context
.p
);
384 bad_subop(tid
, layout
, arrghs
, status
, flags
,
385 "__HYPERVISOR_xsm_op", op
->cmd
);
388 #undef __PRE_XEN_XSM_OP_READ
389 #undef PRE_XEN_XSM_OP_READ
394 PRINT("__HYPERVISOR_sched_op ( %ld, %#lx )", SARG1
, ARG2
);
395 void *arg
= (void *)(unsigned long)ARG2
;
397 #define __PRE_XEN_SCHEDOP_READ(_schedop, _type, _field) \
398 PRE_MEM_READ("XEN_SCHEDOP_" # _schedop " " #_field, \
399 (Addr)&((_type*)arg)->_field, \
400 sizeof(((_type*)arg)->_field))
401 #define PRE_XEN_SCHEDOP_READ(_schedop, _field) \
402 __PRE_XEN_SCHEDOP_READ(_schedop, vki_xen_ ## _schedop ## _t, _field)
405 case VKI_XEN_SCHEDOP_remote_shutdown
:
406 PRE_XEN_SCHEDOP_READ(remote_shutdown
, domain_id
);
407 PRE_XEN_SCHEDOP_READ(remote_shutdown
, reason
);
411 bad_subop(tid
, layout
, arrghs
, status
, flags
,
412 "__HYPERVISOR_sched_op", ARG1
);
415 #undef __PRE_XEN_SCHEDOP_READ
416 #undef PRE_XEN_SCHEDOP_READ
419 static void pre_evtchn_op(ThreadId tid
,
420 SyscallArgLayout
* layout
,
421 /*MOD*/SyscallArgs
* arrghs
,
422 /*OUT*/SyscallStatus
* status
,
424 __vki_u32 cmd
, void *arg
, int compat
)
426 PRINT("__HYPERVISOR_event_channel_op%s ( %u, %p )",
427 compat
? "_compat" : "", cmd
, arg
);
430 case VKI_XEN_EVTCHNOP_alloc_unbound
: {
431 struct vki_xen_evtchn_alloc_unbound
*alloc_unbound
= arg
;
432 PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
433 (Addr
)&alloc_unbound
->dom
, sizeof(alloc_unbound
->dom
));
434 PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
435 (Addr
)&alloc_unbound
->remote_dom
,
436 sizeof(alloc_unbound
->remote_dom
));
441 bad_subop(tid
, layout
, arrghs
, status
, flags
,
442 "__HYPERVISOR_event_channel_op_compat", cmd
);
444 bad_subop(tid
, layout
, arrghs
, status
, flags
,
445 "__HYPERVISOR_event_channel_op", cmd
);
452 pre_evtchn_op(tid
, layout
, arrghs
, status
, flags
,
453 ARG1
, (void *)ARG2
, 0);
456 PRE(evtchn_op_compat
)
458 struct vki_xen_evtchn_op
*evtchn
= (struct vki_xen_evtchn_op
*)ARG1
;
459 PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
460 ARG1
, sizeof(*evtchn
));
462 pre_evtchn_op(tid
, layout
, arrghs
, status
, flags
,
463 evtchn
->cmd
, &evtchn
->u
, 1);
470 PRINT("__HYPERVISOR_physdev_op ( %ld, %#lx )", SARG1
, ARG2
);
472 #define PRE_XEN_PHYSDEVOP_READ(_op, _field) \
473 PRE_MEM_READ("XEN_PHYSDEVOP_" #_op " ." #_field, \
474 (Addr)&arg->_field, \
478 case VKI_XEN_PHYSDEVOP_map_pirq
: {
479 struct vki_xen_physdev_map_pirq
*arg
=
480 (struct vki_xen_physdev_map_pirq
*)ARG2
;
482 PRE_XEN_PHYSDEVOP_READ("map_pirq", domid
);
483 PRE_XEN_PHYSDEVOP_READ("map_pirq", type
);
485 PRE_XEN_PHYSDEVOP_READ("map_pirq", bus
);
486 PRE_XEN_PHYSDEVOP_READ("map_pirq", devfn
);
487 PRE_XEN_PHYSDEVOP_READ("map_pirq", entry_nr
);
488 PRE_XEN_PHYSDEVOP_READ("map_pirq", table_base
);
491 case VKI_XEN_MAP_PIRQ_TYPE_MSI
:
492 PRE_XEN_PHYSDEVOP_READ("map_pirq", index
);
494 case VKI_XEN_MAP_PIRQ_TYPE_GSI
:
495 PRE_XEN_PHYSDEVOP_READ("map_pirq", index
);
496 PRE_XEN_PHYSDEVOP_READ("map_pirq", pirq
);
498 case VKI_XEN_MAP_PIRQ_TYPE_MSI_SEG
:
499 PRE_XEN_PHYSDEVOP_READ("map_pirq", index
);
501 case VKI_XEN_MAP_PIRQ_TYPE_MULTI_MSI
:
506 case VKI_XEN_PHYSDEVOP_unmap_pirq
: {
507 struct vki_xen_physdev_unmap_pirq
*arg
=
508 (struct vki_xen_physdev_unmap_pirq
*)ARG2
;
509 PRE_XEN_PHYSDEVOP_READ("unmap_pirq", domid
);
510 PRE_XEN_PHYSDEVOP_READ("unmap_pirq", pirq
);
514 bad_subop(tid
, layout
, arrghs
, status
, flags
,
515 "__HYPERVISOR_physdev_op", cmd
);
517 #undef PRE_XEN_PHYSDEVOP_READ
522 PRINT("__HYPERVISOR_xen_version ( %ld, %#lx )", SARG1
, ARG2
);
525 case VKI_XENVER_version
:
526 case VKI_XENVER_extraversion
:
527 case VKI_XENVER_compile_info
:
528 case VKI_XENVER_capabilities
:
529 case VKI_XENVER_changeset
:
530 case VKI_XENVER_platform_parameters
:
531 case VKI_XENVER_get_features
:
532 case VKI_XENVER_pagesize
:
533 case VKI_XENVER_guest_handle
:
534 case VKI_XENVER_commandline
:
539 bad_subop(tid
, layout
, arrghs
, status
, flags
,
540 "__HYPERVISOR_xen_version", ARG1
);
547 PRINT("__HYPERVISOR_grant_table_op ( %lu, %#lx, %lu )", ARG1
, ARG2
, ARG3
);
549 case VKI_XEN_GNTTABOP_setup_table
: {
550 struct vki_xen_gnttab_setup_table
*gst
=
551 (struct vki_xen_gnttab_setup_table
*)ARG2
;
552 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
553 (Addr
)&gst
->dom
, sizeof(gst
->dom
));
554 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
555 (Addr
)&gst
->nr_frames
, sizeof(gst
->nr_frames
));
559 bad_subop(tid
, layout
, arrghs
, status
, flags
,
560 "__HYPERVISOR_grant_table_op", ARG1
);
566 struct vki_xen_sysctl
*sysctl
= (struct vki_xen_sysctl
*)ARG1
;
568 PRINT("__HYPERVISOR_sysctl ( %u )", sysctl
->cmd
);
571 * Common part of xen_sysctl:
573 * uint32_t interface_version;
575 PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1
,
576 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
));
581 switch (sysctl
->interface_version
)
589 bad_intf_version(tid
, layout
, arrghs
, status
, flags
,
590 "__HYPERVISOR_sysctl", sysctl
->interface_version
);
594 #define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) \
595 PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field, \
596 (Addr)&sysctl->u._union._field, \
597 sizeof(sysctl->u._union._field))
598 #define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
599 __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
601 switch (sysctl
->cmd
) {
602 case VKI_XEN_SYSCTL_readconsole
:
603 /* These are all unconditionally read */
604 PRE_XEN_SYSCTL_READ(readconsole
, clear
);
605 PRE_XEN_SYSCTL_READ(readconsole
, incremental
);
606 PRE_XEN_SYSCTL_READ(readconsole
, buffer
);
607 PRE_XEN_SYSCTL_READ(readconsole
, count
);
609 /* 'index' only read if 'incremental' is nonzero */
610 if (sysctl
->u
.readconsole
.incremental
)
611 PRE_XEN_SYSCTL_READ(readconsole
, index
);
614 case VKI_XEN_SYSCTL_getdomaininfolist
:
615 switch (sysctl
->interface_version
)
618 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, first_domain
);
619 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, max_domains
);
620 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, buffer
);
623 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, first_domain
);
624 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, max_domains
);
625 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, buffer
);
629 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, first_domain
);
630 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, max_domains
);
631 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, buffer
);
634 VG_(dmsg
)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
635 "%"PRIx32
" not implemented yet\n",
636 sysctl
->interface_version
);
637 SET_STATUS_Failure(VKI_EINVAL
);
642 case VKI_XEN_SYSCTL_debug_keys
:
643 PRE_XEN_SYSCTL_READ(debug_keys
, keys
);
644 PRE_XEN_SYSCTL_READ(debug_keys
, nr_keys
);
645 PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
646 (Addr
)sysctl
->u
.debug_keys
.keys
.p
,
647 sysctl
->u
.debug_keys
.nr_keys
* sizeof(char));
650 case VKI_XEN_SYSCTL_sched_id
:
654 case VKI_XEN_SYSCTL_cpupool_op
:
655 PRE_XEN_SYSCTL_READ(cpupool_op
, op
);
657 switch(sysctl
->u
.cpupool_op
.op
) {
658 case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
:
659 case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY
:
660 case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
:
661 case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU
:
662 case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU
:
663 case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN
:
664 PRE_XEN_SYSCTL_READ(cpupool_op
, cpupool_id
);
667 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
)
668 PRE_XEN_SYSCTL_READ(cpupool_op
, sched_id
);
670 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN
)
671 PRE_XEN_SYSCTL_READ(cpupool_op
, domid
);
673 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU
||
674 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU
)
675 PRE_XEN_SYSCTL_READ(cpupool_op
, cpu
);
679 case VKI_XEN_SYSCTL_physinfo
:
680 /* No input params */
683 case VKI_XEN_SYSCTL_topologyinfo
:
684 PRE_XEN_SYSCTL_READ(topologyinfo
, max_cpu_index
);
685 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_core
);
686 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_socket
);
687 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_node
);
690 case VKI_XEN_SYSCTL_numainfo
:
691 PRE_XEN_SYSCTL_READ(numainfo
, max_node_index
);
692 PRE_XEN_SYSCTL_READ(numainfo
, node_to_memsize
);
693 PRE_XEN_SYSCTL_READ(numainfo
, node_to_memfree
);
694 PRE_XEN_SYSCTL_READ(numainfo
, node_to_node_distance
);
698 bad_subop(tid
, layout
, arrghs
, status
, flags
,
699 "__HYPERVISOR_sysctl", sysctl
->cmd
);
702 #undef PRE_XEN_SYSCTL_READ
703 #undef __PRE_XEN_SYSCTL_READ
708 struct vki_xen_domctl
*domctl
= (struct vki_xen_domctl
*)ARG1
;
710 PRINT("__HYPERVISOR_domctl ( %u ) on dom%d", domctl
->cmd
, domctl
->domain
);
713 * Common part of xen_domctl:
715 * vki_uint32_t interface_version;
716 * vki_xen_domid_t domain;
718 PRE_MEM_READ("__HYPERVISOR_domctl", ARG1
,
719 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
)
720 + sizeof(vki_xen_domid_t
));
725 switch (domctl
->interface_version
)
735 bad_intf_version(tid
, layout
, arrghs
, status
, flags
,
736 "__HYPERVISOR_domctl", domctl
->interface_version
);
740 #define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) \
741 PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field, \
742 (Addr)&domctl->u._union._field, \
743 sizeof(domctl->u._union._field))
744 #define PRE_XEN_DOMCTL_READ(_domctl, _field) \
745 __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
747 switch (domctl
->cmd
) {
748 case VKI_XEN_DOMCTL_destroydomain
:
749 case VKI_XEN_DOMCTL_pausedomain
:
750 case VKI_XEN_DOMCTL_max_vcpus
:
751 case VKI_XEN_DOMCTL_get_address_size
:
752 case VKI_XEN_DOMCTL_gettscinfo
:
753 case VKI_XEN_DOMCTL_getdomaininfo
:
754 case VKI_XEN_DOMCTL_unpausedomain
:
755 case VKI_XEN_DOMCTL_resumedomain
:
756 /* No input fields. */
759 case VKI_XEN_DOMCTL_createdomain
:
760 PRE_XEN_DOMCTL_READ(createdomain
, ssidref
);
761 PRE_XEN_DOMCTL_READ(createdomain
, handle
);
762 PRE_XEN_DOMCTL_READ(createdomain
, flags
);
765 case VKI_XEN_DOMCTL_gethvmcontext
:
766 /* Xen unconditionally reads the 'buffer' pointer */
767 __PRE_XEN_DOMCTL_READ(gethvmcontext
, hvmcontext
, buffer
);
768 /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
769 * buffer is a request for the required size. */
770 if ( domctl
->u
.hvmcontext
.buffer
.p
)
771 __PRE_XEN_DOMCTL_READ(gethvmcontext
, hvmcontext
, size
);
774 case VKI_XEN_DOMCTL_sethvmcontext
:
775 __PRE_XEN_DOMCTL_READ(sethvmcontext
, hvmcontext
, size
);
776 __PRE_XEN_DOMCTL_READ(sethvmcontext
, hvmcontext
, buffer
);
777 PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
778 (Addr
)domctl
->u
.hvmcontext
.buffer
.p
,
779 domctl
->u
.hvmcontext
.size
);
782 case VKI_XEN_DOMCTL_gethvmcontext_partial
:
783 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, type
);
784 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, instance
);
785 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, buffer
);
787 switch (domctl
->u
.hvmcontext_partial
.type
) {
788 case VKI_HVM_SAVE_CODE(CPU
):
789 if ( domctl
->u
.hvmcontext_partial
.buffer
.p
)
790 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
791 (Addr
)domctl
->u
.hvmcontext_partial
.buffer
.p
,
792 VKI_HVM_SAVE_LENGTH(CPU
));
794 case VKI_HVM_SAVE_CODE(MTRR
):
795 if ( domctl
->u
.hvmcontext_partial
.buffer
.p
)
796 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
797 (Addr
)domctl
->u
.hvmcontext_partial
.buffer
.p
,
798 VKI_HVM_SAVE_LENGTH(MTRR
));
801 bad_subop(tid
, layout
, arrghs
, status
, flags
,
802 "__HYPERVISOR_domctl_gethvmcontext_partial type",
803 domctl
->u
.hvmcontext_partial
.type
);
808 case VKI_XEN_DOMCTL_max_mem
:
809 PRE_XEN_DOMCTL_READ(max_mem
, max_memkb
);
812 case VKI_XEN_DOMCTL_set_address_size
:
813 __PRE_XEN_DOMCTL_READ(set_address_size
, address_size
, size
);
816 case VKI_XEN_DOMCTL_test_assign_device
:
817 switch (domctl
->interface_version
) {
818 case 0x00000007: /* pre-4.6 */
822 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_00000007
, machine_sbdf
);
825 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_0000000b
, dev
);
826 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_0000000b
, flag
);
827 switch (domctl
->u
.assign_device_0000000b
.dev
) {
828 case VKI_XEN_DOMCTL_DEV_PCI
:
829 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_0000000b
, u
.pci
);
831 case VKI_XEN_DOMCTL_DEV_DT
:
832 __PRE_XEN_DOMCTL_READ(test_assign_device
, assign_device_0000000b
, u
.dt
);
833 PRE_MEM_READ("XEN_DOMTCL_test_assign_device.dt",
834 (Addr
)domctl
->u
.assign_device_0000000b
.u
.dt
.path
.p
,
835 domctl
->u
.assign_device_0000000b
.u
.dt
.size
);
838 bad_subop(tid
, layout
, arrghs
, status
, flags
,
839 "__HYPERVISOR_domctl_test_assign_device dev",
840 domctl
->u
.assign_device_0000000b
.dev
);
846 case VKI_XEN_DOMCTL_assign_device
:
847 switch (domctl
->interface_version
) {
848 case 0x00000007: /* pre-4.6 */
852 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_00000007
, machine_sbdf
);
855 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_0000000b
, dev
);
856 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_0000000b
, flag
);
857 switch (domctl
->u
.assign_device_0000000b
.dev
) {
858 case VKI_XEN_DOMCTL_DEV_PCI
:
859 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_0000000b
, u
.pci
);
861 case VKI_XEN_DOMCTL_DEV_DT
:
862 __PRE_XEN_DOMCTL_READ(assign_device
, assign_device_0000000b
, u
.dt
);
863 PRE_MEM_READ("XEN_DOMTCL_assign_device.dt",
864 (Addr
)domctl
->u
.assign_device_0000000b
.u
.dt
.path
.p
,
865 domctl
->u
.assign_device_0000000b
.u
.dt
.size
);
868 bad_subop(tid
, layout
, arrghs
, status
, flags
,
869 "__HYPERVISOR_domctl_assign_device dev",
870 domctl
->u
.assign_device_0000000b
.dev
);
876 case VKI_XEN_DOMCTL_deassign_device
:
877 switch (domctl
->interface_version
) {
878 case 0x00000007: /* pre-4.6 */
882 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_00000007
, machine_sbdf
);
885 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_0000000b
, dev
);
886 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_0000000b
, flag
);
887 switch (domctl
->u
.assign_device_0000000b
.dev
) {
888 case VKI_XEN_DOMCTL_DEV_PCI
:
889 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_0000000b
, u
.pci
);
891 case VKI_XEN_DOMCTL_DEV_DT
:
892 __PRE_XEN_DOMCTL_READ(deassign_device
, assign_device_0000000b
, u
.dt
);
893 PRE_MEM_READ("XEN_DOMTCL_assign_device.dt",
894 (Addr
)domctl
->u
.assign_device_0000000b
.u
.dt
.path
.p
,
895 domctl
->u
.assign_device_0000000b
.u
.dt
.size
);
898 bad_subop(tid
, layout
, arrghs
, status
, flags
,
899 "__HYPERVISOR_domctl_deassign_device dev",
900 domctl
->u
.assign_device_0000000b
.dev
);
907 case VKI_XEN_DOMCTL_settscinfo
:
908 switch (domctl
->interface_version
) {
909 case 0x00000007: /* pre-4.6 */
913 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_00000007
, info
.tsc_mode
);
914 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_00000007
, info
.gtsc_khz
);
915 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_00000007
, info
.incarnation
);
916 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_00000007
, info
.elapsed_nsec
);
919 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_0000000b
, tsc_mode
);
920 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_0000000b
, gtsc_khz
);
921 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_0000000b
, incarnation
);
922 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info_0000000b
, elapsed_nsec
);
927 case VKI_XEN_DOMCTL_irq_permission
:
928 PRE_XEN_DOMCTL_READ(irq_permission
, pirq
);
929 PRE_XEN_DOMCTL_READ(irq_permission
, allow_access
);
932 case VKI_XEN_DOMCTL_iomem_permission
:
933 PRE_XEN_DOMCTL_READ(iomem_permission
, first_mfn
);
934 PRE_XEN_DOMCTL_READ(iomem_permission
, nr_mfns
);
935 PRE_XEN_DOMCTL_READ(iomem_permission
, allow_access
);
938 case VKI_XEN_DOMCTL_ioport_permission
:
939 PRE_XEN_DOMCTL_READ(ioport_permission
, first_port
);
940 PRE_XEN_DOMCTL_READ(ioport_permission
, nr_ports
);
941 PRE_XEN_DOMCTL_READ(ioport_permission
, allow_access
);
944 case VKI_XEN_DOMCTL_hypercall_init
:
945 PRE_XEN_DOMCTL_READ(hypercall_init
, gmfn
);
948 case VKI_XEN_DOMCTL_settimeoffset
:
949 PRE_XEN_DOMCTL_READ(settimeoffset
, time_offset_seconds
);
952 case VKI_XEN_DOMCTL_getvcpuinfo
:
953 PRE_XEN_DOMCTL_READ(getvcpuinfo
, vcpu
);
956 case VKI_XEN_DOMCTL_scheduler_op
:
957 PRE_XEN_DOMCTL_READ(scheduler_op
, sched_id
);
958 PRE_XEN_DOMCTL_READ(scheduler_op
, cmd
);
959 if ( domctl
->u
.scheduler_op
.cmd
== VKI_XEN_DOMCTL_SCHEDOP_putinfo
) {
960 switch(domctl
->u
.scheduler_op
.sched_id
) {
961 case VKI_XEN_SCHEDULER_SEDF
:
962 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.period
);
963 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.slice
);
964 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.latency
);
965 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.extratime
);
966 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.weight
);
968 case VKI_XEN_SCHEDULER_CREDIT
:
969 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit
.weight
);
970 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit
.cap
);
972 case VKI_XEN_SCHEDULER_CREDIT2
:
973 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit2
.weight
);
975 case VKI_XEN_SCHEDULER_RTDS
:
976 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.rtds
.period
);
977 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.rtds
.budget
);
979 case VKI_XEN_SCHEDULER_ARINC653
:
985 case VKI_XEN_DOMCTL_getvcpuaffinity
:
986 switch (domctl
->interface_version
) {
990 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_00000009
, vcpu
);
991 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_00000009
, cpumap
.nr_bits
);
994 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_0000000a
, vcpu
);
995 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
)
996 __PRE_XEN_DOMCTL_READ(
997 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_hard
.nr_bits
);
998 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
)
999 __PRE_XEN_DOMCTL_READ(
1000 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_soft
.nr_bits
);
1005 case VKI_XEN_DOMCTL_setvcpuaffinity
:
1006 switch (domctl
->interface_version
) {
1010 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_00000009
, vcpu
);
1011 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_00000009
, cpumap
.nr_bits
);
1012 PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
1013 (Addr
)domctl
->u
.vcpuaffinity_00000009
.cpumap
.bitmap
.p
,
1014 domctl
->u
.vcpuaffinity_00000009
.cpumap
.nr_bits
/ 8);
1017 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_0000000a
, vcpu
);
1018 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_0000000a
, flags
);
1019 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
) {
1020 __PRE_XEN_DOMCTL_READ(
1021 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_hard
.nr_bits
);
1023 "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_hard.bitmap",
1024 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.bitmap
.p
,
1025 domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.nr_bits
/ 8);
1027 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
) {
1028 __PRE_XEN_DOMCTL_READ(
1029 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_soft
.nr_bits
);
1031 "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_soft.bitmap",
1032 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.bitmap
.p
,
1033 domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.nr_bits
/ 8);
1039 case VKI_XEN_DOMCTL_getnodeaffinity
:
1040 __PRE_XEN_DOMCTL_READ(nodeaffinity
, nodeaffinity
, nodemap
.nr_bits
);
1042 case VKI_XEN_DOMCTL_setnodeaffinity
:
1043 __PRE_XEN_DOMCTL_READ(nodeaffinity
, nodeaffinity
, nodemap
.nr_bits
);
1044 PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
1045 (Addr
)domctl
->u
.nodeaffinity
.nodemap
.bitmap
.p
,
1046 domctl
->u
.nodeaffinity
.nodemap
.nr_bits
/ 8);
1049 case VKI_XEN_DOMCTL_getvcpucontext
:
1050 __PRE_XEN_DOMCTL_READ(getvcpucontext
, vcpucontext
, vcpu
);
1053 case VKI_XEN_DOMCTL_setvcpucontext
:
1054 __PRE_XEN_DOMCTL_READ(setvcpucontext
, vcpucontext
, vcpu
);
1055 __PRE_XEN_DOMCTL_READ(setvcpucontext
, vcpucontext
, ctxt
.p
);
1058 case VKI_XEN_DOMCTL_pin_mem_cacheattr
:
1059 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr
, start
);
1060 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr
, end
);
1061 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr
, type
);
1064 case VKI_XEN_DOMCTL_get_ext_vcpucontext
:
1065 switch (domctl
->interface_version
)
1069 __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext
, ext_vcpucontext_00000008
, vcpu
);
1073 __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext
, ext_vcpucontext_00000009
, vcpu
);
1077 VG_(dmsg
)("WARNING: VKI_XEN_DOMCTL_get_ext_vcpucontext domctl version %#"
1078 PRIx32
" not implemented\n", domctl
->interface_version
);
1079 SET_STATUS_Failure(VKI_EINVAL
);
1084 case VKI_XEN_DOMCTL_set_ext_vcpucontext
:
1085 switch (domctl
->interface_version
)
1089 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
, vcpu
);
1090 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
, size
);
1091 #if defined(__i386__) || defined(__x86_64__)
1092 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1093 syscall32_callback_eip
);
1094 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1095 sysenter_callback_eip
);
1096 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1097 syscall32_callback_cs
);
1098 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1099 sysenter_callback_cs
);
1100 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1101 syscall32_disables_events
);
1102 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1103 sysenter_disables_events
);
1105 if ( domctl
->u
.ext_vcpucontext_00000008
.size
>=
1106 offsetof(struct vki_xen_domctl_ext_vcpucontext_00000008
, mcg_cap
) )
1107 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
1113 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
, vcpu
);
1114 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
, size
);
1115 #if defined(__i386__) || defined(__x86_64__)
1116 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1117 syscall32_callback_eip
);
1118 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1119 sysenter_callback_eip
);
1120 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1121 syscall32_callback_cs
);
1122 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1123 sysenter_callback_cs
);
1124 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1125 syscall32_disables_events
);
1126 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1127 sysenter_disables_events
);
1129 if ( domctl
->u
.ext_vcpucontext_00000009
.size
>=
1130 offsetof(struct vki_xen_domctl_ext_vcpucontext_00000009
, caps
) )
1132 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1134 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1136 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
1143 VG_(dmsg
)("WARNING: VKI_XEN_DOMCTL_set_ext_vcpucontext domctl version %#"
1144 PRIx32
" not implemented\n", domctl
->interface_version
);
1145 SET_STATUS_Failure(VKI_EINVAL
);
1150 case VKI_XEN_DOMCTL_set_cpuid
:
1151 PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
1152 (Addr
)&domctl
->u
.cpuid
, sizeof(domctl
->u
.cpuid
));
1155 case VKI_XEN_DOMCTL_getpageframeinfo3
:
1156 PRE_XEN_DOMCTL_READ(getpageframeinfo3
, num
);
1157 PRE_XEN_DOMCTL_READ(getpageframeinfo3
, array
.p
);
1158 PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
1159 (Addr
)domctl
->u
.getpageframeinfo3
.array
.p
,
1160 domctl
->u
.getpageframeinfo3
.num
* sizeof(vki_xen_pfn_t
));
1163 case VKI_XEN_DOMCTL_setvcpuextstate
:
1164 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, vcpu
);
1165 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, size
);
1166 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, buffer
);
1167 PRE_MEM_READ("XEN_DOMCTL_setvcpuextstate *u.vcpuextstate.buffer.p",
1168 (Addr
)domctl
->u
.vcpuextstate
.buffer
.p
,
1169 domctl
->u
.vcpuextstate
.size
);
1172 case VKI_XEN_DOMCTL_getvcpuextstate
:
1173 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, vcpu
);
1174 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, xfeature_mask
);
1175 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, size
);
1176 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, buffer
);
1179 case VKI_XEN_DOMCTL_shadow_op
:
1180 PRE_XEN_DOMCTL_READ(shadow_op
, op
);
1182 switch(domctl
->u
.shadow_op
.op
)
1184 case VKI_XEN_DOMCTL_SHADOW_OP_OFF
:
1185 case VKI_XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION
:
1186 /* No further inputs */
1189 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE
:
1190 PRE_XEN_DOMCTL_READ(shadow_op
, mode
);
1191 switch(domctl
->u
.shadow_op
.mode
)
1193 case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY
:
1194 goto domctl_shadow_op_enable_logdirty
;
1198 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1199 "__HYPERVISOR_domctl shadowop mode",
1200 domctl
->u
.shadow_op
.mode
);
1204 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY
:
1205 domctl_shadow_op_enable_logdirty
:
1206 /* No further inputs */
1209 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN
:
1210 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK
:
1211 PRE_XEN_DOMCTL_READ(shadow_op
, dirty_bitmap
);
1212 PRE_XEN_DOMCTL_READ(shadow_op
, pages
);
1215 case VKI_XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION
:
1216 PRE_XEN_DOMCTL_READ(shadow_op
, mb
);
1220 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1221 "__HYPERVISOR_domctl shadow(10)",
1222 domctl
->u
.shadow_op
.op
);
1227 case VKI_XEN_DOMCTL_set_max_evtchn
:
1228 PRE_XEN_DOMCTL_READ(set_max_evtchn
, max_port
);
1231 case VKI_XEN_DOMCTL_cacheflush
:
1232 PRE_XEN_DOMCTL_READ(cacheflush
, start_pfn
);
1233 PRE_XEN_DOMCTL_READ(cacheflush
, nr_pfns
);
1236 case VKI_XEN_DOMCTL_set_access_required
:
1237 PRE_XEN_DOMCTL_READ(access_required
, access_required
);
1240 case VKI_XEN_DOMCTL_mem_event_op
:
1241 //case VKI_XEN_DOMCTL_vm_event_op: /* name change in 4.6 */
1242 switch (domctl
->interface_version
) {
1243 case 0x00000007: /* pre-4.6 */
1247 __PRE_XEN_DOMCTL_READ(mem_event_op
, mem_event_op_00000007
, op
);
1248 __PRE_XEN_DOMCTL_READ(mem_event_op
, mem_event_op_00000007
, mode
);
1251 __PRE_XEN_DOMCTL_READ(vm_event_op
, vm_event_op_0000000b
, op
);
1252 __PRE_XEN_DOMCTL_READ(vm_event_op
, vm_event_op_0000000b
, mode
);
1257 case VKI_XEN_DOMCTL_debug_op
:
1258 PRE_XEN_DOMCTL_READ(debug_op
, op
);
1259 PRE_XEN_DOMCTL_READ(debug_op
, vcpu
);
1262 case VKI_XEN_DOMCTL_get_vcpu_msrs
:
1263 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs
, vcpu_msrs
, vcpu
);
1264 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs
, vcpu_msrs
, msr_count
);
1265 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs
, vcpu_msrs
, msrs
);
1268 case VKI_XEN_DOMCTL_set_vcpu_msrs
:
1269 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs
, vcpu_msrs
, vcpu
);
1270 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs
, vcpu_msrs
, msr_count
);
1271 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs
, vcpu_msrs
, msrs
);
1272 PRE_MEM_READ("XEN_DOMCTL_set_vcpu_msrs *u.vcpu_msrs.msrs.p",
1273 (Addr
)domctl
->u
.vcpu_msrs
.msrs
.p
,
1274 sizeof(vki_xen_domctl_vcpu_msr_t
) *
1275 domctl
->u
.vcpu_msrs
.msr_count
);
1278 case VKI_XEN_DOMCTL_monitor_op
:
1279 switch (domctl
->interface_version
) {
1281 if (domctl
->u
.monitor_op_0000000b
.op
== VKI_XEN_DOMCTL_MONITOR_OP_ENABLE
||
1282 domctl
->u
.monitor_op_0000000b
.op
== VKI_XEN_DOMCTL_MONITOR_OP_DISABLE
) {
1283 switch (domctl
->u
.monitor_op_0000000b
.event
) {
1284 case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG
:
1285 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_0000000b
, u
.mov_to_cr
);
1287 case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR
:
1288 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_0000000b
, u
.mov_to_msr
);
1290 case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST
:
1291 __PRE_XEN_DOMCTL_READ(monitor_op
, monitor_op_0000000b
, u
.guest_request
);
1293 case VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES
:
1303 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1304 "__HYPERVISOR_domctl", domctl
->cmd
);
1307 #undef PRE_XEN_DOMCTL_READ
1308 #undef __PRE_XEN_DOMCTL_READ
1313 unsigned long op
= ARG1
;
1314 void *arg
= (void *)(unsigned long)ARG2
;
1316 PRINT("__HYPERVISOR_hvm_op ( %ld, %#lx )", SARG1
, ARG2
);
1318 #define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) \
1319 PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field, \
1320 (Addr)&((_type*)arg)->_field, \
1321 sizeof(((_type*)arg)->_field))
1322 #define PRE_XEN_HVMOP_READ(_hvm_op, _field) \
1323 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1326 case VKI_XEN_HVMOP_set_param
:
1327 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, domid
);
1328 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, index
);
1329 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, value
);
1332 case VKI_XEN_HVMOP_get_param
:
1333 __PRE_XEN_HVMOP_READ(get_param
, struct vki_xen_hvm_param
, domid
);
1334 __PRE_XEN_HVMOP_READ(get_param
, struct vki_xen_hvm_param
, index
);
1337 case VKI_XEN_HVMOP_set_pci_intx_level
:
1338 PRE_XEN_HVMOP_READ(set_pci_intx_level
, domid
);
1339 PRE_XEN_HVMOP_READ(set_pci_intx_level
, domain
);
1340 PRE_XEN_HVMOP_READ(set_pci_intx_level
, bus
);
1341 PRE_XEN_HVMOP_READ(set_pci_intx_level
, device
);
1342 PRE_XEN_HVMOP_READ(set_pci_intx_level
, level
);
1345 case VKI_XEN_HVMOP_set_isa_irq_level
:
1346 PRE_XEN_HVMOP_READ(set_isa_irq_level
, domid
);
1347 PRE_XEN_HVMOP_READ(set_isa_irq_level
, isa_irq
);
1348 PRE_XEN_HVMOP_READ(set_isa_irq_level
, level
);
1351 case VKI_XEN_HVMOP_set_pci_link_route
:
1352 PRE_XEN_HVMOP_READ(set_pci_link_route
, domid
);
1353 PRE_XEN_HVMOP_READ(set_pci_link_route
, link
);
1354 PRE_XEN_HVMOP_READ(set_pci_link_route
, isa_irq
);
1357 case VKI_XEN_HVMOP_track_dirty_vram
: {
1358 vki_xen_hvm_track_dirty_vram_t
*Arg
=
1359 (vki_xen_hvm_track_dirty_vram_t
*)ARG2
;
1360 PRE_XEN_HVMOP_READ(track_dirty_vram
, domid
);
1361 PRE_XEN_HVMOP_READ(track_dirty_vram
, nr
);
1363 PRE_XEN_HVMOP_READ(track_dirty_vram
, first_pfn
);
1364 PRE_XEN_HVMOP_READ(track_dirty_vram
, dirty_bitmap
);
1369 case VKI_XEN_HVMOP_set_mem_type
:
1370 PRE_XEN_HVMOP_READ(set_mem_type
, domid
);
1371 PRE_XEN_HVMOP_READ(set_mem_type
, hvmmem_type
);
1372 PRE_XEN_HVMOP_READ(set_mem_type
, nr
);
1373 PRE_XEN_HVMOP_READ(set_mem_type
, first_pfn
);
1376 case VKI_XEN_HVMOP_set_mem_access
:
1377 PRE_XEN_HVMOP_READ(set_mem_access
, domid
);
1378 PRE_XEN_HVMOP_READ(set_mem_access
, hvmmem_access
);
1379 PRE_XEN_HVMOP_READ(set_mem_access
, first_pfn
);
1380 /* if default access */
1381 if ( ((vki_xen_hvm_set_mem_access_t
*)arg
)->first_pfn
!= ~0ULL)
1382 PRE_XEN_HVMOP_READ(set_mem_access
, nr
);
1385 case VKI_XEN_HVMOP_get_mem_access
:
1386 PRE_XEN_HVMOP_READ(get_mem_access
, domid
);
1387 PRE_XEN_HVMOP_READ(get_mem_access
, pfn
);
1389 PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
1390 (Addr
)&(((vki_xen_hvm_get_mem_access_t
*)arg
)->hvmmem_access
),
1391 sizeof(vki_uint16_t
));
1394 case VKI_XEN_HVMOP_inject_trap
:
1395 PRE_XEN_HVMOP_READ(inject_trap
, domid
);
1396 PRE_XEN_HVMOP_READ(inject_trap
, vcpuid
);
1397 PRE_XEN_HVMOP_READ(inject_trap
, vector
);
1398 PRE_XEN_HVMOP_READ(inject_trap
, type
);
1399 PRE_XEN_HVMOP_READ(inject_trap
, error_code
);
1400 PRE_XEN_HVMOP_READ(inject_trap
, insn_len
);
1401 PRE_XEN_HVMOP_READ(inject_trap
, cr2
);
1405 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1406 "__HYPERVISOR_hvm_op", op
);
1409 #undef __PRE_XEN_HVMOP_READ
1410 #undef PRE_XEN_HVMOP_READ
1415 struct vki_xen_tmem_op
*tmem
= (struct vki_xen_tmem_op
*)ARG1
;
1417 PRINT("__HYPERVISOR_tmem_op ( %u )", tmem
->cmd
);
1419 /* Common part for xen_tmem_op:
1422 PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1
, sizeof(vki_uint32_t
));
1425 #define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field) \
1426 PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field, \
1427 (Addr)&tmem->u._union._field, \
1428 sizeof(tmem->u._union._field))
1429 #define PRE_XEN_TMEMOP_READ(_tmem, _field) \
1430 __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
1434 case VKI_XEN_TMEM_control
:
1436 /* Common part for control hypercall:
1437 * vki_int32_t pool_id;
1438 * vki_uint32_t subop;
1440 PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
1441 (Addr
)&tmem
->pool_id
, sizeof(tmem
->pool_id
));
1442 PRE_XEN_TMEMOP_READ(ctrl
, subop
);
1444 switch (tmem
->u
.ctrl
.subop
) {
1446 case VKI_XEN_TMEMC_save_begin
:
1447 PRE_XEN_TMEMOP_READ(ctrl
, cli_id
);
1448 PRE_XEN_TMEMOP_READ(ctrl
, arg1
);
1449 PRE_XEN_TMEMOP_READ(ctrl
, buf
);
1453 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1454 "__HYPERVISOR_tmem_op_control", tmem
->u
.ctrl
.subop
);
1460 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1461 "__HYPERVISOR_tmem_op", ARG1
);
1464 #undef PRE_XEN_TMEMOP_READ
1465 #undef __PRE_XEN_TMEMOP_READ
1471 case VKI_XENMEM_maximum_ram_page
:
1472 case VKI_XENMEM_set_memory_map
:
1473 case VKI_XENMEM_decrease_reservation
:
1474 case VKI_XENMEM_claim_pages
:
1475 case VKI_XENMEM_maximum_gpfn
:
1476 case VKI_XENMEM_remove_from_physmap
:
1477 case VKI_XENMEM_access_op
:
1480 case VKI_XENMEM_increase_reservation
:
1481 case VKI_XENMEM_populate_physmap
: {
1482 struct xen_memory_reservation
*memory_reservation
=
1483 (struct xen_memory_reservation
*)ARG2
;
1485 POST_MEM_WRITE((Addr
)memory_reservation
->extent_start
.p
,
1486 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
1490 case VKI_XENMEM_machphys_mfn_list
:
1491 case VKI_XENMEM_machphys_compat_mfn_list
: {
1492 struct vki_xen_machphys_mfn_list
*arg
=
1493 (struct vki_xen_machphys_mfn_list
*)ARG2
;
1494 POST_MEM_WRITE((Addr
)&arg
->nr_extents
, sizeof(arg
->nr_extents
));
1495 POST_MEM_WRITE((Addr
)arg
->extent_start
.p
,
1496 sizeof(vki_xen_pfn_t
) * arg
->nr_extents
);
1500 case VKI_XENMEM_memory_map
:
1501 case VKI_XENMEM_machine_memory_map
: {
1502 struct vki_xen_memory_map
*arg
=
1503 (struct vki_xen_memory_map
*)ARG2
;
1504 POST_MEM_WRITE(arg
->nr_entries
, sizeof(arg
->nr_entries
));
1505 POST_MEM_WRITE((Addr
)arg
->buffer
.p
,
1506 arg
->nr_entries
* 20 /* size of an e820 entry */);
1510 case VKI_XENMEM_add_to_physmap
: {
1511 struct vki_xen_add_to_physmap
*arg
=
1512 (struct vki_xen_add_to_physmap
*)ARG2
;
1513 if (arg
->space
== VKI_XENMAPSPACE_gmfn_range
)
1514 POST_MEM_WRITE(ARG2
, sizeof(*arg
));
1517 case VKI_XENMEM_get_sharing_freed_pages
:
1518 case VKI_XENMEM_get_sharing_shared_pages
:
1526 unsigned int *pdone
= (unsigned int *)ARG3
;
1528 POST_MEM_WRITE((Addr
)pdone
, sizeof(*pdone
));
1533 /* XXX assuming flask, only actual XSM right now */
1534 struct vki_xen_flask_op
*op
= (struct vki_xen_flask_op
*)ARG1
;
1536 switch (op
->interface_version
) {
1543 #define POST_XEN_XSM_OP_WRITE(_xsm_op, _union, _field) \
1544 POST_MEM_WRITE((Addr)&op->u._union._field, \
1545 sizeof(op->u._union._field))
1548 case VKI_FLASK_SID_TO_CONTEXT
:
1549 POST_XEN_XSM_OP_WRITE(SID_TO_CONTEXT
, sid_context
, size
);
1550 POST_MEM_WRITE((Addr
)op
->u
.sid_context
.context
.p
,
1551 op
->u
.sid_context
.size
);
1555 static void post_evtchn_op(ThreadId tid
, __vki_u32 cmd
, void *arg
, int compat
)
1558 case VKI_XEN_EVTCHNOP_alloc_unbound
: {
1559 struct vki_xen_evtchn_alloc_unbound
*alloc_unbound
= arg
;
1560 POST_MEM_WRITE((Addr
)&alloc_unbound
->port
, sizeof(alloc_unbound
->port
));
1569 case VKI_XEN_SCHEDOP_remote_shutdown
:
1577 post_evtchn_op(tid
, ARG1
, (void *)ARG2
, 0);
1580 POST(evtchn_op_compat
)
1582 struct vki_xen_evtchn_op
*evtchn
= (struct vki_xen_evtchn_op
*)ARG1
;
1583 post_evtchn_op(tid
, evtchn
->cmd
, &evtchn
->u
, 1);
1590 #define POST_XEN_PHYSDEVOP_WRITE(_op, _field) \
1591 POST_MEM_WRITE((Addr)&arg->_field, sizeof(arg->_field))
1594 case VKI_XEN_PHYSDEVOP_unmap_pirq
:
1598 case VKI_XEN_PHYSDEVOP_map_pirq
: {
1599 struct vki_xen_physdev_map_pirq
*arg
=
1600 (struct vki_xen_physdev_map_pirq
*)ARG2
;
1601 if (arg
->type
== VKI_XEN_MAP_PIRQ_TYPE_MULTI_MSI
)
1602 POST_XEN_PHYSDEVOP_WRITE("map_pirq", entry_nr
);
1603 POST_XEN_PHYSDEVOP_WRITE("map_pirq", pirq
);
1606 #undef POST_XEN_PHYSDEVOP_WRITE
1616 case VKI_XENVER_version
:
1619 case VKI_XENVER_extraversion
:
1620 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_extraversion_t
));
1622 case VKI_XENVER_compile_info
:
1623 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_compile_info
));
1625 case VKI_XENVER_capabilities
:
1626 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_capabilities_info_t
));
1628 case VKI_XENVER_changeset
:
1629 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_changeset_info_t
));
1631 case VKI_XENVER_platform_parameters
:
1632 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_platform_parameters
));
1634 case VKI_XENVER_get_features
:
1635 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_feature_info
));
1637 case VKI_XENVER_pagesize
:
1640 case VKI_XENVER_guest_handle
:
1641 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_domain_handle_t
));
1643 case VKI_XENVER_commandline
:
1644 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_commandline_t
));
1649 POST(grant_table_op
)
1652 case VKI_XEN_GNTTABOP_setup_table
: {
1653 struct vki_xen_gnttab_setup_table
*gst
=
1654 (struct vki_xen_gnttab_setup_table
*)ARG2
;
1655 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1656 (Addr
)&gst
->status
, sizeof(gst
->status
));
1657 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1658 (Addr
)gst
->frame_list
.p
,
1659 sizeof(*gst
->frame_list
.p
) & gst
->nr_frames
);
1667 struct vki_xen_sysctl
*sysctl
= (struct vki_xen_sysctl
*)ARG1
;
1669 switch (sysctl
->interface_version
)
1680 #define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
1681 POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
1682 sizeof(sysctl->u._union._field))
1683 #define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1684 __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1686 switch (sysctl
->cmd
) {
1687 case VKI_XEN_SYSCTL_readconsole
:
1688 POST_MEM_WRITE((Addr
)sysctl
->u
.readconsole
.buffer
.p
,
1689 sysctl
->u
.readconsole
.count
* sizeof(char));
1692 case VKI_XEN_SYSCTL_getdomaininfolist
:
1693 switch (sysctl
->interface_version
)
1696 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008
, num_domains
);
1697 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000008
.buffer
.p
,
1698 sizeof(*sysctl
->u
.getdomaininfolist_00000008
.buffer
.p
)
1699 * sysctl
->u
.getdomaininfolist_00000008
.num_domains
);
1702 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009
, num_domains
);
1703 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000009
.buffer
.p
,
1704 sizeof(*sysctl
->u
.getdomaininfolist_00000009
.buffer
.p
)
1705 * sysctl
->u
.getdomaininfolist_00000009
.num_domains
);
1709 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a
, num_domains
);
1710 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_0000000a
.buffer
.p
,
1711 sizeof(*sysctl
->u
.getdomaininfolist_0000000a
.buffer
.p
)
1712 * sysctl
->u
.getdomaininfolist_0000000a
.num_domains
);
1717 case VKI_XEN_SYSCTL_sched_id
:
1718 POST_XEN_SYSCTL_WRITE(sched_id
, sched_id
);
1721 case VKI_XEN_SYSCTL_cpupool_op
:
1722 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
||
1723 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
)
1724 POST_XEN_SYSCTL_WRITE(cpupool_op
, cpupool_id
);
1725 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
) {
1726 POST_XEN_SYSCTL_WRITE(cpupool_op
, sched_id
);
1727 POST_XEN_SYSCTL_WRITE(cpupool_op
, n_dom
);
1729 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
||
1730 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO
)
1731 POST_XEN_SYSCTL_WRITE(cpupool_op
, cpumap
);
1734 case VKI_XEN_SYSCTL_physinfo
:
1735 switch (sysctl
->interface_version
)
1738 case 0x00000009: /* Unchanged from version 8 */
1739 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, threads_per_core
);
1740 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, cores_per_socket
);
1741 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, nr_cpus
);
1742 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, max_cpu_id
);
1743 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, nr_nodes
);
1744 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, max_node_id
);
1745 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, cpu_khz
);
1746 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, total_pages
);
1747 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, free_pages
);
1748 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, scrub_pages
);
1749 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, hw_cap
[8]);
1750 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, capabilities
);
1754 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, threads_per_core
);
1755 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, cores_per_socket
);
1756 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, nr_cpus
);
1757 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, max_cpu_id
);
1758 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, nr_nodes
);
1759 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, max_node_id
);
1760 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, cpu_khz
);
1761 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, total_pages
);
1762 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, free_pages
);
1763 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, scrub_pages
);
1764 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, outstanding_pages
);
1765 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, hw_cap
[8]);
1766 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, capabilities
);
1771 case VKI_XEN_SYSCTL_topologyinfo
:
1772 POST_XEN_SYSCTL_WRITE(topologyinfo
, max_cpu_index
);
1773 if (sysctl
->u
.topologyinfo
.cpu_to_core
.p
)
1774 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_core
.p
,
1775 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1776 if (sysctl
->u
.topologyinfo
.cpu_to_socket
.p
)
1777 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_socket
.p
,
1778 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1779 if (sysctl
->u
.topologyinfo
.cpu_to_node
.p
)
1780 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_node
.p
,
1781 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1784 case VKI_XEN_SYSCTL_numainfo
:
1785 POST_XEN_SYSCTL_WRITE(numainfo
, max_node_index
);
1786 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_memsize
.p
,
1787 sizeof(uint64_t) * sysctl
->u
.numainfo
.max_node_index
);
1788 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_memfree
.p
,
1789 sizeof(uint64_t) * sysctl
->u
.numainfo
.max_node_index
);
1790 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_node_distance
.p
,
1791 sizeof(uint32_t) * sysctl
->u
.numainfo
.max_node_index
);
1795 case VKI_XEN_SYSCTL_debug_keys
:
1798 #undef POST_XEN_SYSCTL_WRITE
1799 #undef __POST_XEN_SYSCTL_WRITE
1803 struct vki_xen_domctl
*domctl
= (struct vki_xen_domctl
*)ARG1
;
1805 switch (domctl
->interface_version
) {
1816 #define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
1817 POST_MEM_WRITE((Addr)&domctl->u._union._field, \
1818 sizeof(domctl->u._union._field));
1819 #define POST_XEN_DOMCTL_WRITE(_domctl, _field) \
1820 __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
1822 switch (domctl
->cmd
) {
1823 case VKI_XEN_DOMCTL_createdomain
:
1824 case VKI_XEN_DOMCTL_destroydomain
:
1825 case VKI_XEN_DOMCTL_pausedomain
:
1826 case VKI_XEN_DOMCTL_max_mem
:
1827 case VKI_XEN_DOMCTL_setvcpuextstate
:
1828 case VKI_XEN_DOMCTL_set_address_size
:
1829 case VKI_XEN_DOMCTL_test_assign_device
:
1830 case VKI_XEN_DOMCTL_assign_device
:
1831 case VKI_XEN_DOMCTL_deassign_device
:
1832 case VKI_XEN_DOMCTL_settscinfo
:
1833 case VKI_XEN_DOMCTL_irq_permission
:
1834 case VKI_XEN_DOMCTL_iomem_permission
:
1835 case VKI_XEN_DOMCTL_ioport_permission
:
1836 case VKI_XEN_DOMCTL_hypercall_init
:
1837 case VKI_XEN_DOMCTL_setvcpucontext
:
1838 case VKI_XEN_DOMCTL_pin_mem_cacheattr
:
1839 case VKI_XEN_DOMCTL_set_ext_vcpucontext
:
1840 case VKI_XEN_DOMCTL_setnodeaffinity
:
1841 case VKI_XEN_DOMCTL_set_cpuid
:
1842 case VKI_XEN_DOMCTL_unpausedomain
:
1843 case VKI_XEN_DOMCTL_sethvmcontext
:
1844 case VKI_XEN_DOMCTL_debug_op
:
1845 case VKI_XEN_DOMCTL_set_max_evtchn
:
1846 case VKI_XEN_DOMCTL_cacheflush
:
1847 case VKI_XEN_DOMCTL_resumedomain
:
1848 case VKI_XEN_DOMCTL_set_vcpu_msrs
:
1849 case VKI_XEN_DOMCTL_set_access_required
:
1850 /* No output fields */
1853 case VKI_XEN_DOMCTL_max_vcpus
:
1854 POST_XEN_DOMCTL_WRITE(max_vcpus
, max
);
1857 case VKI_XEN_DOMCTL_get_address_size
:
1858 __POST_XEN_DOMCTL_WRITE(get_address_size
, address_size
, size
);
1861 case VKI_XEN_DOMCTL_gettscinfo
:
1862 switch (domctl
->interface_version
) {
1863 case 0x00000007: /* pre-4.6 */
1867 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_00000007
, out_info
);
1868 POST_MEM_WRITE((Addr
)domctl
->u
.tsc_info_00000007
.out_info
.p
,
1869 sizeof(vki_xen_guest_tsc_info_t
));
1872 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_0000000b
, tsc_mode
);
1873 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_0000000b
, gtsc_khz
);
1874 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_0000000b
, incarnation
);
1875 __POST_XEN_DOMCTL_WRITE(gettscinfo
, tsc_info_0000000b
, elapsed_nsec
);
1881 case VKI_XEN_DOMCTL_getvcpuinfo
:
1882 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, online
);
1883 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, blocked
);
1884 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, running
);
1885 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, cpu_time
);
1886 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, cpu
);
1889 case VKI_XEN_DOMCTL_gethvmcontext
:
1890 /* Xen unconditionally writes size... */
1891 __POST_XEN_DOMCTL_WRITE(gethvmcontext
, hvmcontext
, size
);
1892 /* ...but only writes to the buffer if it was non NULL */
1893 if ( domctl
->u
.hvmcontext
.buffer
.p
)
1894 POST_MEM_WRITE((Addr
)domctl
->u
.hvmcontext
.buffer
.p
,
1895 sizeof(*domctl
->u
.hvmcontext
.buffer
.p
)
1896 * domctl
->u
.hvmcontext
.size
);
1899 case VKI_XEN_DOMCTL_gethvmcontext_partial
:
1900 switch (domctl
->u
.hvmcontext_partial
.type
) {
1901 case VKI_HVM_SAVE_CODE(CPU
):
1902 if ( domctl
->u
.hvmcontext_partial
.buffer
.p
)
1903 POST_MEM_WRITE((Addr
)domctl
->u
.hvmcontext_partial
.buffer
.p
,
1904 VKI_HVM_SAVE_LENGTH(CPU
));
1909 case VKI_XEN_DOMCTL_scheduler_op
:
1910 if ( domctl
->u
.scheduler_op
.cmd
== VKI_XEN_DOMCTL_SCHEDOP_getinfo
) {
1911 switch(domctl
->u
.scheduler_op
.sched_id
) {
1912 case VKI_XEN_SCHEDULER_SEDF
:
1913 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.period
);
1914 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.slice
);
1915 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.latency
);
1916 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.extratime
);
1917 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.weight
);
1919 case VKI_XEN_SCHEDULER_CREDIT
:
1920 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit
.weight
);
1921 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit
.cap
);
1923 case VKI_XEN_SCHEDULER_CREDIT2
:
1924 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit2
.weight
);
1926 case VKI_XEN_SCHEDULER_ARINC653
:
1928 case VKI_XEN_SCHEDULER_RTDS
:
1929 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.rtds
.period
);
1930 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.rtds
.budget
);
1936 case VKI_XEN_DOMCTL_getvcpuaffinity
:
1937 case VKI_XEN_DOMCTL_setvcpuaffinity
: /* Writes back actual result */
1938 switch (domctl
->interface_version
) {
1942 POST_MEM_WRITE((Addr
)domctl
->u
.vcpuaffinity_00000009
.cpumap
.bitmap
.p
,
1943 domctl
->u
.vcpuaffinity_00000009
.cpumap
.nr_bits
/ 8);
1946 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
)
1948 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.bitmap
.p
,
1949 domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.nr_bits
/ 8);
1950 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
)
1952 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.bitmap
.p
,
1953 domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.nr_bits
/ 8);
1957 case VKI_XEN_DOMCTL_getnodeaffinity
:
1958 POST_MEM_WRITE((Addr
)domctl
->u
.nodeaffinity
.nodemap
.bitmap
.p
,
1959 domctl
->u
.nodeaffinity
.nodemap
.nr_bits
/ 8);
1962 case VKI_XEN_DOMCTL_getdomaininfo
:
1963 switch (domctl
->interface_version
) {
1965 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, domain
);
1966 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, flags
);
1967 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, tot_pages
);
1968 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, max_pages
);
1969 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, shr_pages
);
1970 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, shared_info_frame
);
1971 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, cpu_time
);
1972 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, nr_online_vcpus
);
1973 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, max_vcpu_id
);
1974 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, ssidref
);
1975 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, handle
);
1976 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, cpupool
);
1979 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, domain
);
1980 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, flags
);
1981 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, tot_pages
);
1982 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, max_pages
);
1983 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, shr_pages
);
1984 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, paged_pages
);
1985 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, shared_info_frame
);
1986 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, cpu_time
);
1987 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, nr_online_vcpus
);
1988 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, max_vcpu_id
);
1989 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, ssidref
);
1990 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, handle
);
1991 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, cpupool
);
1995 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, domain
);
1996 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, flags
);
1997 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, tot_pages
);
1998 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, max_pages
);
1999 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, outstanding_pages
);
2000 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, shr_pages
);
2001 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, paged_pages
);
2002 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, shared_info_frame
);
2003 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, cpu_time
);
2004 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, nr_online_vcpus
);
2005 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, max_vcpu_id
);
2006 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, ssidref
);
2007 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, handle
);
2008 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, cpupool
);
2012 case VKI_XEN_DOMCTL_getvcpucontext
:
2013 __POST_XEN_DOMCTL_WRITE(getvcpucontext
, vcpucontext
, ctxt
.p
);
2016 case VKI_XEN_DOMCTL_getpageframeinfo3
:
2017 POST_MEM_WRITE((Addr
)domctl
->u
.getpageframeinfo3
.array
.p
,
2018 domctl
->u
.getpageframeinfo3
.num
* sizeof(vki_xen_pfn_t
));
2021 case VKI_XEN_DOMCTL_get_ext_vcpucontext
:
2022 switch (domctl
->interface_version
)
2026 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
, size
);
2027 #if defined(__i386__) || defined(__x86_64__)
2028 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2029 syscall32_callback_eip
);
2030 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2031 sysenter_callback_eip
);
2032 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2033 syscall32_callback_cs
);
2034 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2035 sysenter_callback_cs
);
2036 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2037 syscall32_disables_events
);
2038 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2039 sysenter_disables_events
);
2041 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
2047 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
, size
);
2048 #if defined(__i386__) || defined(__x86_64__)
2049 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2050 syscall32_callback_eip
);
2051 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2052 sysenter_callback_eip
);
2053 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2054 syscall32_callback_cs
);
2055 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2056 sysenter_callback_cs
);
2057 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2058 syscall32_disables_events
);
2059 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2060 sysenter_disables_events
);
2062 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2064 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2066 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
2074 case VKI_XEN_DOMCTL_getvcpuextstate
:
2075 if (domctl
->u
.vcpuextstate
.buffer
.p
)
2076 POST_MEM_WRITE((Addr
)domctl
->u
.vcpuextstate
.buffer
.p
,
2077 domctl
->u
.vcpuextstate
.size
);
2080 case VKI_XEN_DOMCTL_shadow_op
:
2081 switch(domctl
->u
.shadow_op
.op
)
2083 case VKI_XEN_DOMCTL_SHADOW_OP_OFF
:
2084 case VKI_XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION
:
2088 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN
:
2089 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK
:
2090 POST_XEN_DOMCTL_WRITE(shadow_op
, pages
);
2091 POST_XEN_DOMCTL_WRITE(shadow_op
, stats
.fault_count
);
2092 POST_XEN_DOMCTL_WRITE(shadow_op
, stats
.dirty_count
);
2093 if(domctl
->u
.shadow_op
.dirty_bitmap
.p
)
2094 POST_MEM_WRITE((Addr
)domctl
->u
.shadow_op
.dirty_bitmap
.p
,
2095 domctl
->u
.shadow_op
.pages
* sizeof(vki_uint8_t
));
2098 case VKI_XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION
:
2099 POST_XEN_DOMCTL_WRITE(shadow_op
, mb
);
2106 case VKI_XEN_DOMCTL_get_vcpu_msrs
:
2107 if (domctl
->u
.vcpu_msrs
.msrs
.p
)
2108 POST_MEM_WRITE((Addr
)domctl
->u
.vcpu_msrs
.msrs
.p
,
2109 sizeof(vki_xen_domctl_vcpu_msr_t
) *
2110 domctl
->u
.vcpu_msrs
.msr_count
);
2113 case VKI_XEN_DOMCTL_mem_event_op
:
2114 //case VKI_XEN_DOMCTL_vm_event_op: /* name change in 4.6 */
2115 switch (domctl
->interface_version
) {
2116 case 0x00000007: /* pre-4.6 */
2120 __POST_XEN_DOMCTL_WRITE(mem_event_op
, mem_event_op_00000007
, port
);
2123 __POST_XEN_DOMCTL_WRITE(vm_event_op
, vm_event_op_0000000b
, port
);
2128 case VKI_XEN_DOMCTL_monitor_op
:
2129 switch (domctl
->interface_version
) {
2131 if (domctl
->u
.monitor_op_0000000b
.op
== VKI_XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES
) {
2132 switch(domctl
->u
.monitor_op_0000000b
.event
) {
2133 case VKI_XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG
:
2134 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_0000000b
, u
.mov_to_cr
);
2136 case VKI_XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR
:
2137 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_0000000b
, u
.mov_to_msr
);
2139 case VKI_XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST
:
2140 __POST_XEN_DOMCTL_WRITE(monitor_op
, monitor_op_0000000b
, u
.guest_request
);
2150 #undef POST_XEN_DOMCTL_WRITE
2151 #undef __POST_XEN_DOMCTL_WRITE
2156 unsigned long op
= ARG1
;
2157 void *arg
= (void *)(unsigned long)ARG2
;
2159 #define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) \
2160 POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
2161 sizeof(((_type*)arg)->_field))
2162 #define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
2163 __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
2166 case VKI_XEN_HVMOP_set_param
:
2167 case VKI_XEN_HVMOP_set_pci_intx_level
:
2168 case VKI_XEN_HVMOP_set_isa_irq_level
:
2169 case VKI_XEN_HVMOP_set_pci_link_route
:
2170 case VKI_XEN_HVMOP_set_mem_type
:
2171 case VKI_XEN_HVMOP_set_mem_access
:
2172 case VKI_XEN_HVMOP_inject_trap
:
2173 /* No output parameters */
2176 case VKI_XEN_HVMOP_get_param
:
2177 __POST_XEN_HVMOP_WRITE(get_param
, struct vki_xen_hvm_param
, value
);
2180 case VKI_XEN_HVMOP_get_mem_access
:
2181 POST_XEN_HVMOP_WRITE(get_mem_access
, hvmmem_access
);
2184 #undef __POST_XEN_HVMOP_WRITE
2185 #undef POST_XEN_HVMOP_WRITE
2190 struct vki_xen_tmem_op
*tmem
= (struct vki_xen_tmem_op
*)ARG1
;
2194 case VKI_XEN_TMEM_control
:
2196 switch(tmem
->u
.ctrl
.subop
) {
2198 case VKI_XEN_TMEMC_save_begin
:
2208 SyscallTableEntry entry
;
2211 XenHypercallTableEntry
;
2213 #define HYPX_(const, name, nr_args) \
2214 [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
2215 #define HYPXY(const, name, nr_args) \
2216 [const] = { { vgSysWrap_xen_##name##_before, \
2217 vgSysWrap_xen_##name##_after }, \
2220 static XenHypercallTableEntry hypercall_table
[] = {
2221 // __VKI_XEN_set_trap_table // 0
2222 // __VKI_XEN_mmu_update // 1
2223 // __VKI_XEN_set_gdt // 2
2224 // __VKI_XEN_stack_switch // 3
2225 // __VKI_XEN_set_callbacks // 4
2227 // __VKI_XEN_fpu_taskswitch // 5
2228 // __VKI_XEN_sched_op_compat // 6
2229 // __VKI_XEN_platform_op // 7
2230 // __VKI_XEN_set_debugreg // 8
2231 // __VKI_XEN_get_debugreg // 9
2233 // __VKI_XEN_update_descriptor // 10
2235 HYPXY(__VKI_XEN_memory_op
, memory_op
, 2), // 12
2236 // __VKI_XEN_multicall // 13
2237 // __VKI_XEN_update_va_mapping // 14
2239 // __VKI_XEN_set_timer_op // 15
2240 HYPXY(__VKI_XEN_event_channel_op_compat
, evtchn_op_compat
, 1), // 16
2241 HYPXY(__VKI_XEN_xen_version
, xen_version
, 2), // 17
2242 // __VKI_XEN_console_io // 18
2243 // __VKI_XEN_physdev_op_compat // 19
2245 HYPXY(__VKI_XEN_grant_table_op
, grant_table_op
, 3), // 20
2246 // __VKI_XEN_vm_assist // 21
2247 // __VKI_XEN_update_va_mapping_otherdomain // 22
2248 // __VKI_XEN_iret, iret // 23
2249 // __VKI_XEN_vcpu_op, vcpu_op // 24
2251 // __VKI_XEN_set_segment_base // 25
2252 HYPXY(__VKI_XEN_mmuext_op
, mmuext_op
, 2), // 26
2253 HYPXY(__VKI_XEN_xsm_op
, xsm_op
, 1), // 27
2254 // __VKI_XEN_nmi_op // 28
2255 HYPXY(__VKI_XEN_sched_op
, sched_op
, 2), // 29
2257 // __VKI_XEN_callback_op // 30
2258 // __VKI_XEN_xenoprof_op // 31
2259 HYPXY(__VKI_XEN_event_channel_op
, evtchn_op
, 2), // 32
2260 HYPXY(__VKI_XEN_physdev_op
, physdev_op
, 2), // 33
2261 HYPXY(__VKI_XEN_hvm_op
, hvm_op
, 2), // 34
2263 HYPXY(__VKI_XEN_sysctl
, sysctl
, 1), // 35
2264 HYPXY(__VKI_XEN_domctl
, domctl
, 1), // 36
2265 // __VKI_XEN_kexec_op // 37
2266 HYPXY(__VKI_XEN_tmem_op
, tmem_op
, 1), // 38
2269 static void bad_before ( ThreadId tid
,
2270 SyscallArgLayout
* layout
,
2271 /*MOD*/SyscallArgs
* args
,
2272 /*OUT*/SyscallStatus
* status
,
2273 /*OUT*/UWord
* flags
)
2275 VG_(dmsg
)("WARNING: unhandled hypercall: %s\n",
2276 VG_SYSNUM_STRING(args
->sysno
));
2277 if (VG_(clo_verbosity
) > 1) {
2278 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
2280 VG_(dmsg
)("You may be able to write your own handler.\n");
2281 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
2282 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
2283 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
2284 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
2286 SET_STATUS_Failure(VKI_ENOSYS
);
2289 static XenHypercallTableEntry bad_hyper
=
2290 { { bad_before
, NULL
}, 0 };
2292 static XenHypercallTableEntry
* ML_(get_xen_hypercall_entry
) ( UInt sysno
)
2294 XenHypercallTableEntry
*ret
= &bad_hyper
;
2296 const UInt hypercall_table_size
2297 = sizeof(hypercall_table
) / sizeof(hypercall_table
[0]);
2299 /* Is it in the contiguous initial section of the table? */
2300 if (sysno
< hypercall_table_size
) {
2301 XenHypercallTableEntry
* ent
= &hypercall_table
[sysno
];
2302 if (ent
->entry
.before
!= NULL
)
2306 /* Can't find a wrapper */
2310 DEFN_PRE_TEMPLATE(xen
, hypercall
)
2312 XenHypercallTableEntry
*ent
= ML_(get_xen_hypercall_entry
)(SYSNO
);
2314 /* Return number of arguments consumed */
2315 ARG8
= ent
->nr_args
;
2318 vg_assert(ent
->entry
.before
);
2319 (ent
->entry
.before
)( tid
, layout
, arrghs
, status
, flags
);
2323 DEFN_POST_TEMPLATE(xen
, hypercall
)
2325 XenHypercallTableEntry
*ent
= ML_(get_xen_hypercall_entry
)(SYSNO
);
2327 /* Return number of arguments consumed */
2328 ARG8
= ent
->nr_args
;
2331 if (ent
->entry
.after
)
2332 (ent
->entry
.after
)( tid
, arrghs
, status
);
2335 #endif // defined(ENABLE_XEN)