2 /*--------------------------------------------------------------------*/
3 /*--- Xen Hypercalls syswrap-xen.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2012 Citrix Systems
11 ian.campbell@citrix.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
34 #if defined(ENABLE_XEN)
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
40 #include "pub_core_transtab.h" // VG_(discard_translations)
41 #include "pub_core_xarray.h"
42 #include "pub_core_clientstate.h"
43 #include "pub_core_debuglog.h"
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcassert.h"
46 #include "pub_core_libcfile.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_mallocfree.h"
51 #include "pub_core_tooliface.h"
52 #include "pub_core_options.h"
53 #include "pub_core_scheduler.h"
54 #include "pub_core_signals.h"
55 #include "pub_core_syscall.h"
56 #include "pub_core_syswrap.h"
57 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
59 #include "priv_types_n_macros.h"
60 #include "priv_syswrap-generic.h"
61 #include "priv_syswrap-xen.h"
65 #define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
66 #define POST(name) static DEFN_POST_TEMPLATE(xen, name)
68 static void bad_intf_version ( ThreadId tid
,
69 SyscallArgLayout
* layout
,
70 /*MOD*/SyscallArgs
* args
,
71 /*OUT*/SyscallStatus
* status
,
73 const HChar
* hypercall
,
76 VG_(dmsg
)("WARNING: %s version %#lx not supported\n",
78 if (VG_(clo_verbosity
) > 1) {
79 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
81 VG_(dmsg
)("You may be able to write your own handler.\n");
82 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
83 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
84 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
85 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
87 SET_STATUS_Failure(VKI_ENOSYS
);
90 static void bad_subop ( ThreadId tid
,
91 SyscallArgLayout
* layout
,
92 /*MOD*/SyscallArgs
* args
,
93 /*OUT*/SyscallStatus
* status
,
95 const HChar
* hypercall
,
98 VG_(dmsg
)("WARNING: unhandled %s subop: %ld\n",
100 if (VG_(clo_verbosity
) > 1) {
101 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
103 VG_(dmsg
)("You may be able to write your own handler.\n");
104 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
105 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
106 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
107 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
109 SET_STATUS_Failure(VKI_ENOSYS
);
114 PRINT("__HYPERVISOR_memory_op ( %ld, %lx )", ARG1
, ARG2
);
118 case VKI_XENMEM_maximum_ram_page
:
122 case VKI_XENMEM_maximum_gpfn
:
123 PRE_MEM_READ("XENMEM_maximum_gpfn domid",
124 (Addr
)ARG2
, sizeof(vki_xen_domid_t
));
127 case VKI_XENMEM_machphys_mfn_list
: {
128 struct vki_xen_machphys_mfn_list
*arg
=
129 (struct vki_xen_machphys_mfn_list
*)ARG2
;
130 PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
131 (Addr
)&arg
->max_extents
, sizeof(arg
->max_extents
));
132 PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
133 (Addr
)&arg
->extent_start
, sizeof(arg
->extent_start
));
137 case VKI_XENMEM_set_memory_map
: {
138 struct vki_xen_foreign_memory_map
*arg
=
139 (struct vki_xen_foreign_memory_map
*)ARG2
;
140 PRE_MEM_READ("XENMEM_set_memory_map domid",
141 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
142 PRE_MEM_READ("XENMEM_set_memory_map map",
143 (Addr
)&arg
->map
, sizeof(arg
->map
));
146 case VKI_XENMEM_increase_reservation
:
147 case VKI_XENMEM_decrease_reservation
:
148 case VKI_XENMEM_populate_physmap
:
149 case VKI_XENMEM_claim_pages
: {
150 struct xen_memory_reservation
*memory_reservation
=
151 (struct xen_memory_reservation
*)ARG2
;
155 case VKI_XENMEM_increase_reservation
:
156 which
= "XENMEM_increase_reservation";
158 case VKI_XENMEM_decrease_reservation
:
159 which
= "XENMEM_decrease_reservation";
161 (Addr
)memory_reservation
->extent_start
.p
,
162 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
164 case VKI_XENMEM_populate_physmap
:
165 which
= "XENMEM_populate_physmap";
167 (Addr
)memory_reservation
->extent_start
.p
,
168 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
170 case VKI_XENMEM_claim_pages
:
171 which
= "XENMEM_claim_pages";
174 which
= "XENMEM_unknown";
179 (Addr
)&memory_reservation
->extent_start
,
180 sizeof(memory_reservation
->extent_start
));
182 (Addr
)&memory_reservation
->nr_extents
,
183 sizeof(memory_reservation
->nr_extents
));
185 (Addr
)&memory_reservation
->extent_order
,
186 sizeof(memory_reservation
->extent_order
));
188 (Addr
)&memory_reservation
->mem_flags
,
189 sizeof(memory_reservation
->mem_flags
));
191 (Addr
)&memory_reservation
->domid
,
192 sizeof(memory_reservation
->domid
));
196 case VKI_XENMEM_add_to_physmap
: {
197 struct vki_xen_add_to_physmap
*arg
=
198 (struct vki_xen_add_to_physmap
*)ARG2
;
199 PRE_MEM_READ("XENMEM_add_to_physmap domid",
200 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
201 PRE_MEM_READ("XENMEM_add_to_physmap size",
202 (Addr
)&arg
->size
, sizeof(arg
->size
));
203 PRE_MEM_READ("XENMEM_add_to_physmap space",
204 (Addr
)&arg
->space
, sizeof(arg
->space
));
205 PRE_MEM_READ("XENMEM_add_to_physmap idx",
206 (Addr
)&arg
->idx
, sizeof(arg
->idx
));
207 PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
208 (Addr
)&arg
->gpfn
, sizeof(arg
->gpfn
));
212 case VKI_XENMEM_remove_from_physmap
: {
213 struct vki_xen_remove_from_physmap
*arg
=
214 (struct vki_xen_remove_from_physmap
*)ARG2
;
215 PRE_MEM_READ("XENMEM_remove_from_physmap domid",
216 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
217 PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
218 (Addr
)&arg
->gpfn
, sizeof(arg
->gpfn
));
222 case VKI_XENMEM_get_sharing_freed_pages
:
223 case VKI_XENMEM_get_sharing_shared_pages
:
226 case VKI_XENMEM_access_op
: {
227 struct vki_xen_mem_event_op
*arg
=
228 (struct vki_xen_mem_event_op
*)ARG2
;
229 PRE_MEM_READ("XENMEM_access_op domid",
230 (Addr
)&arg
->domain
, sizeof(arg
->domain
));
231 PRE_MEM_READ("XENMEM_access_op op",
232 (Addr
)&arg
->op
, sizeof(arg
->op
));
233 PRE_MEM_READ("XENMEM_access_op gfn",
234 (Addr
)&arg
->gfn
, sizeof(arg
->gfn
));
238 bad_subop(tid
, layout
, arrghs
, status
, flags
,
239 "__HYPERVISOR_memory_op", ARG1
);
246 struct vki_xen_mmuext_op
*ops
= (struct vki_xen_mmuext_op
*)ARG1
;
247 unsigned int i
, nr
= ARG2
;
249 for (i
=0; i
<nr
; i
++) {
250 struct vki_xen_mmuext_op
*op
= ops
+ i
;
251 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
252 (Addr
)&op
->cmd
, sizeof(op
->cmd
));
254 case VKI_XEN_MMUEXT_PIN_L1_TABLE
:
255 case VKI_XEN_MMUEXT_PIN_L2_TABLE
:
256 case VKI_XEN_MMUEXT_PIN_L3_TABLE
:
257 case VKI_XEN_MMUEXT_PIN_L4_TABLE
:
258 case VKI_XEN_MMUEXT_UNPIN_TABLE
:
259 case VKI_XEN_MMUEXT_NEW_BASEPTR
:
260 case VKI_XEN_MMUEXT_CLEAR_PAGE
:
261 case VKI_XEN_MMUEXT_COPY_PAGE
:
262 case VKI_XEN_MMUEXT_MARK_SUPER
:
263 case VKI_XEN_MMUEXT_UNMARK_SUPER
:
264 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
266 sizeof(op
->arg1
.mfn
));
269 case VKI_XEN_MMUEXT_INVLPG_LOCAL
:
270 case VKI_XEN_MMUEXT_INVLPG_ALL
:
271 case VKI_XEN_MMUEXT_SET_LDT
:
272 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
273 (Addr
)&op
->arg1
.linear_addr
,
274 sizeof(op
->arg1
.linear_addr
));
277 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL
:
278 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI
:
279 case VKI_XEN_MMUEXT_INVLPG_MULTI
:
280 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL
:
281 case VKI_XEN_MMUEXT_FLUSH_CACHE
:
282 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR
:
283 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL
:
289 case VKI_XEN_MMUEXT_SET_LDT
:
290 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
291 (Addr
)&op
->arg2
.nr_ents
,
292 sizeof(op
->arg2
.nr_ents
));
295 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI
:
296 case VKI_XEN_MMUEXT_INVLPG_MULTI
:
298 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
299 (Addr
)&op
->arg2
.vcpumask
,
300 sizeof(op
->arg2
.vcpumask
));
303 case VKI_XEN_MMUEXT_COPY_PAGE
:
304 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
305 (Addr
)&op
->arg2
.src_mfn
,
306 sizeof(op
->arg2
.src_mfn
));
309 case VKI_XEN_MMUEXT_PIN_L1_TABLE
:
310 case VKI_XEN_MMUEXT_PIN_L2_TABLE
:
311 case VKI_XEN_MMUEXT_PIN_L3_TABLE
:
312 case VKI_XEN_MMUEXT_PIN_L4_TABLE
:
313 case VKI_XEN_MMUEXT_UNPIN_TABLE
:
314 case VKI_XEN_MMUEXT_NEW_BASEPTR
:
315 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL
:
316 case VKI_XEN_MMUEXT_INVLPG_LOCAL
:
317 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL
:
318 case VKI_XEN_MMUEXT_INVLPG_ALL
:
319 case VKI_XEN_MMUEXT_FLUSH_CACHE
:
320 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR
:
321 case VKI_XEN_MMUEXT_CLEAR_PAGE
:
322 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL
:
323 case VKI_XEN_MMUEXT_MARK_SUPER
:
324 case VKI_XEN_MMUEXT_UNMARK_SUPER
:
331 static void pre_evtchn_op(ThreadId tid
,
332 SyscallArgLayout
* layout
,
333 /*MOD*/SyscallArgs
* arrghs
,
334 /*OUT*/SyscallStatus
* status
,
336 __vki_u32 cmd
, void *arg
, int compat
)
338 PRINT("__HYPERVISOR_event_channel_op%s ( %d, %p )",
339 compat
? "_compat" : "", cmd
, arg
);
342 case VKI_XEN_EVTCHNOP_alloc_unbound
: {
343 struct vki_xen_evtchn_alloc_unbound
*alloc_unbound
= arg
;
344 PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
345 (Addr
)&alloc_unbound
->dom
, sizeof(alloc_unbound
->dom
));
346 PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
347 (Addr
)&alloc_unbound
->remote_dom
,
348 sizeof(alloc_unbound
->remote_dom
));
353 bad_subop(tid
, layout
, arrghs
, status
, flags
,
354 "__HYPERVISOR_event_channel_op_compat", cmd
);
356 bad_subop(tid
, layout
, arrghs
, status
, flags
,
357 "__HYPERVISOR_event_channel_op", cmd
);
364 pre_evtchn_op(tid
, layout
, arrghs
, status
, flags
,
365 ARG1
, (void *)ARG2
, 0);
368 PRE(evtchn_op_compat
)
370 struct vki_xen_evtchn_op
*evtchn
= (struct vki_xen_evtchn_op
*)ARG1
;
371 PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
372 ARG1
, sizeof(*evtchn
));
374 pre_evtchn_op(tid
, layout
, arrghs
, status
, flags
,
375 evtchn
->cmd
, &evtchn
->u
, 1);
380 PRINT("__HYPERVISOR_xen_version ( %ld, %lx )", ARG1
, ARG2
);
383 case VKI_XENVER_version
:
384 case VKI_XENVER_extraversion
:
385 case VKI_XENVER_compile_info
:
386 case VKI_XENVER_capabilities
:
387 case VKI_XENVER_changeset
:
388 case VKI_XENVER_platform_parameters
:
389 case VKI_XENVER_get_features
:
390 case VKI_XENVER_pagesize
:
391 case VKI_XENVER_guest_handle
:
392 case VKI_XENVER_commandline
:
397 bad_subop(tid
, layout
, arrghs
, status
, flags
,
398 "__HYPERVISOR_xen_version", ARG1
);
405 PRINT("__HYPERVISOR_grant_table_op ( %ld, 0x%lx, %ld )", ARG1
, ARG2
, ARG3
);
407 case VKI_XEN_GNTTABOP_setup_table
: {
408 struct vki_xen_gnttab_setup_table
*gst
=
409 (struct vki_xen_gnttab_setup_table
*)ARG2
;
410 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
411 (Addr
)&gst
->dom
, sizeof(gst
->dom
));
412 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
413 (Addr
)&gst
->nr_frames
, sizeof(gst
->nr_frames
));
417 bad_subop(tid
, layout
, arrghs
, status
, flags
,
418 "__HYPERVISOR_grant_table_op", ARG1
);
424 struct vki_xen_sysctl
*sysctl
= (struct vki_xen_sysctl
*)ARG1
;
426 PRINT("__HYPERVISOR_sysctl ( %d )", sysctl
->cmd
);
429 * Common part of xen_sysctl:
431 * uint32_t interface_version;
433 PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1
,
434 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
));
439 switch (sysctl
->interface_version
)
447 bad_intf_version(tid
, layout
, arrghs
, status
, flags
,
448 "__HYPERVISOR_sysctl", sysctl
->interface_version
);
452 #define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) \
453 PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field, \
454 (Addr)&sysctl->u._union._field, \
455 sizeof(sysctl->u._union._field))
456 #define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
457 __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
459 switch (sysctl
->cmd
) {
460 case VKI_XEN_SYSCTL_readconsole
:
461 /* These are all unconditionally read */
462 PRE_XEN_SYSCTL_READ(readconsole
, clear
);
463 PRE_XEN_SYSCTL_READ(readconsole
, incremental
);
464 PRE_XEN_SYSCTL_READ(readconsole
, buffer
);
465 PRE_XEN_SYSCTL_READ(readconsole
, count
);
467 /* 'index' only read if 'incremental' is nonzero */
468 if (sysctl
->u
.readconsole
.incremental
)
469 PRE_XEN_SYSCTL_READ(readconsole
, index
);
472 case VKI_XEN_SYSCTL_getdomaininfolist
:
473 switch (sysctl
->interface_version
)
476 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, first_domain
);
477 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, max_domains
);
478 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, buffer
);
481 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, first_domain
);
482 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, max_domains
);
483 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, buffer
);
487 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, first_domain
);
488 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, max_domains
);
489 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, buffer
);
492 VG_(dmsg
)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
493 "%"PRIx32
" not implemented yet\n",
494 sysctl
->interface_version
);
495 SET_STATUS_Failure(VKI_EINVAL
);
500 case VKI_XEN_SYSCTL_debug_keys
:
501 PRE_XEN_SYSCTL_READ(debug_keys
, keys
);
502 PRE_XEN_SYSCTL_READ(debug_keys
, nr_keys
);
503 PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
504 (Addr
)sysctl
->u
.debug_keys
.keys
.p
,
505 sysctl
->u
.debug_keys
.nr_keys
* sizeof(char));
508 case VKI_XEN_SYSCTL_sched_id
:
512 case VKI_XEN_SYSCTL_cpupool_op
:
513 PRE_XEN_SYSCTL_READ(cpupool_op
, op
);
515 switch(sysctl
->u
.cpupool_op
.op
) {
516 case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
:
517 case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY
:
518 case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
:
519 case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU
:
520 case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU
:
521 case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN
:
522 PRE_XEN_SYSCTL_READ(cpupool_op
, cpupool_id
);
525 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
)
526 PRE_XEN_SYSCTL_READ(cpupool_op
, sched_id
);
528 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN
)
529 PRE_XEN_SYSCTL_READ(cpupool_op
, domid
);
531 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU
||
532 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU
)
533 PRE_XEN_SYSCTL_READ(cpupool_op
, cpu
);
537 case VKI_XEN_SYSCTL_physinfo
:
538 /* No input params */
541 case VKI_XEN_SYSCTL_topologyinfo
:
542 PRE_XEN_SYSCTL_READ(topologyinfo
, max_cpu_index
);
543 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_core
);
544 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_socket
);
545 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_node
);
548 case VKI_XEN_SYSCTL_numainfo
:
549 PRE_XEN_SYSCTL_READ(numainfo
, max_node_index
);
550 PRE_XEN_SYSCTL_READ(numainfo
, node_to_memsize
);
551 PRE_XEN_SYSCTL_READ(numainfo
, node_to_memfree
);
552 PRE_XEN_SYSCTL_READ(numainfo
, node_to_node_distance
);
556 bad_subop(tid
, layout
, arrghs
, status
, flags
,
557 "__HYPERVISOR_sysctl", sysctl
->cmd
);
560 #undef PRE_XEN_SYSCTL_READ
561 #undef __PRE_XEN_SYSCTL_READ
566 struct vki_xen_domctl
*domctl
= (struct vki_xen_domctl
*)ARG1
;
568 PRINT("__HYPERVISOR_domctl ( %d ) on dom%d", domctl
->cmd
, domctl
->domain
);
571 * Common part of xen_domctl:
573 * vki_uint32_t interface_version;
574 * vki_xen_domid_t domain;
576 PRE_MEM_READ("__HYPERVISOR_domctl", ARG1
,
577 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
)
578 + sizeof(vki_xen_domid_t
));
583 switch (domctl
->interface_version
)
591 bad_intf_version(tid
, layout
, arrghs
, status
, flags
,
592 "__HYPERVISOR_domctl", domctl
->interface_version
);
596 #define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) \
597 PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field, \
598 (Addr)&domctl->u._union._field, \
599 sizeof(domctl->u._union._field))
600 #define PRE_XEN_DOMCTL_READ(_domctl, _field) \
601 __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
603 switch (domctl
->cmd
) {
604 case VKI_XEN_DOMCTL_destroydomain
:
605 case VKI_XEN_DOMCTL_pausedomain
:
606 case VKI_XEN_DOMCTL_max_vcpus
:
607 case VKI_XEN_DOMCTL_get_address_size
:
608 case VKI_XEN_DOMCTL_gettscinfo
:
609 case VKI_XEN_DOMCTL_getdomaininfo
:
610 case VKI_XEN_DOMCTL_unpausedomain
:
611 case VKI_XEN_DOMCTL_resumedomain
:
612 /* No input fields. */
615 case VKI_XEN_DOMCTL_createdomain
:
616 PRE_XEN_DOMCTL_READ(createdomain
, ssidref
);
617 PRE_XEN_DOMCTL_READ(createdomain
, handle
);
618 PRE_XEN_DOMCTL_READ(createdomain
, flags
);
621 case VKI_XEN_DOMCTL_gethvmcontext
:
622 /* Xen unconditionally reads the 'buffer' pointer */
623 __PRE_XEN_DOMCTL_READ(gethvmcontext
, hvmcontext
, buffer
);
624 /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
625 * buffer is a request for the required size. */
626 if ( domctl
->u
.hvmcontext
.buffer
.p
)
627 __PRE_XEN_DOMCTL_READ(gethvmcontext
, hvmcontext
, size
);
630 case VKI_XEN_DOMCTL_sethvmcontext
:
631 __PRE_XEN_DOMCTL_READ(sethvmcontext
, hvmcontext
, size
);
632 __PRE_XEN_DOMCTL_READ(sethvmcontext
, hvmcontext
, buffer
);
633 PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
634 (Addr
)domctl
->u
.hvmcontext
.buffer
.p
,
635 domctl
->u
.hvmcontext
.size
);
638 case VKI_XEN_DOMCTL_gethvmcontext_partial
:
639 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, type
);
640 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, instance
);
641 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, buffer
);
643 switch (domctl
->u
.hvmcontext_partial
.type
) {
644 case VKI_HVM_SAVE_CODE(CPU
):
645 if ( domctl
->u
.hvmcontext_partial
.buffer
.p
)
646 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
647 (Addr
)domctl
->u
.hvmcontext_partial
.buffer
.p
,
648 VKI_HVM_SAVE_LENGTH(CPU
));
651 bad_subop(tid
, layout
, arrghs
, status
, flags
,
652 "__HYPERVISOR_domctl_gethvmcontext_partial type",
653 domctl
->u
.hvmcontext_partial
.type
);
658 case VKI_XEN_DOMCTL_max_mem
:
659 PRE_XEN_DOMCTL_READ(max_mem
, max_memkb
);
662 case VKI_XEN_DOMCTL_set_address_size
:
663 __PRE_XEN_DOMCTL_READ(set_address_size
, address_size
, size
);
666 case VKI_XEN_DOMCTL_settscinfo
:
667 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info
, info
.tsc_mode
);
668 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info
, info
.gtsc_khz
);
669 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info
, info
.incarnation
);
670 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info
, info
.elapsed_nsec
);
673 case VKI_XEN_DOMCTL_ioport_permission
:
674 PRE_XEN_DOMCTL_READ(ioport_permission
, first_port
);
675 PRE_XEN_DOMCTL_READ(ioport_permission
, nr_ports
);
676 PRE_XEN_DOMCTL_READ(ioport_permission
, allow_access
);
679 case VKI_XEN_DOMCTL_hypercall_init
:
680 PRE_XEN_DOMCTL_READ(hypercall_init
, gmfn
);
683 case VKI_XEN_DOMCTL_settimeoffset
:
684 PRE_XEN_DOMCTL_READ(settimeoffset
, time_offset_seconds
);
687 case VKI_XEN_DOMCTL_getvcpuinfo
:
688 PRE_XEN_DOMCTL_READ(getvcpuinfo
, vcpu
);
691 case VKI_XEN_DOMCTL_scheduler_op
:
692 PRE_XEN_DOMCTL_READ(scheduler_op
, sched_id
);
693 PRE_XEN_DOMCTL_READ(scheduler_op
, cmd
);
694 if ( domctl
->u
.scheduler_op
.cmd
== VKI_XEN_DOMCTL_SCHEDOP_putinfo
) {
695 switch(domctl
->u
.scheduler_op
.sched_id
) {
696 case VKI_XEN_SCHEDULER_SEDF
:
697 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.period
);
698 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.slice
);
699 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.latency
);
700 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.extratime
);
701 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.weight
);
703 case VKI_XEN_SCHEDULER_CREDIT
:
704 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit
.weight
);
705 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit
.cap
);
707 case VKI_XEN_SCHEDULER_CREDIT2
:
708 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit2
.weight
);
710 case VKI_XEN_SCHEDULER_RTDS
:
711 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.rtds
.period
);
712 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.rtds
.budget
);
714 case VKI_XEN_SCHEDULER_ARINC653
:
720 case VKI_XEN_DOMCTL_getvcpuaffinity
:
721 switch (domctl
->interface_version
) {
725 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_00000009
, vcpu
);
726 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_00000009
, cpumap
.nr_bits
);
729 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity_0000000a
, vcpu
);
730 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
)
731 __PRE_XEN_DOMCTL_READ(
732 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_hard
.nr_bits
);
733 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
)
734 __PRE_XEN_DOMCTL_READ(
735 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_soft
.nr_bits
);
740 case VKI_XEN_DOMCTL_setvcpuaffinity
:
741 switch (domctl
->interface_version
) {
745 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_00000009
, vcpu
);
746 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_00000009
, cpumap
.nr_bits
);
747 PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
748 (Addr
)domctl
->u
.vcpuaffinity_00000009
.cpumap
.bitmap
.p
,
749 domctl
->u
.vcpuaffinity_00000009
.cpumap
.nr_bits
/ 8);
752 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_0000000a
, vcpu
);
753 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity_0000000a
, flags
);
754 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
) {
755 __PRE_XEN_DOMCTL_READ(
756 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_hard
.nr_bits
);
758 "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_hard.bitmap",
759 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.bitmap
.p
,
760 domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.nr_bits
/ 8);
762 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
) {
763 __PRE_XEN_DOMCTL_READ(
764 setvcpuaffinity
, vcpuaffinity_0000000a
, cpumap_soft
.nr_bits
);
766 "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_soft.bitmap",
767 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.bitmap
.p
,
768 domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.nr_bits
/ 8);
774 case VKI_XEN_DOMCTL_getnodeaffinity
:
775 __PRE_XEN_DOMCTL_READ(nodeaffinity
, nodeaffinity
, nodemap
.nr_bits
);
777 case VKI_XEN_DOMCTL_setnodeaffinity
:
778 __PRE_XEN_DOMCTL_READ(nodeaffinity
, nodeaffinity
, nodemap
.nr_bits
);
779 PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
780 (Addr
)domctl
->u
.nodeaffinity
.nodemap
.bitmap
.p
,
781 domctl
->u
.nodeaffinity
.nodemap
.nr_bits
/ 8);
784 case VKI_XEN_DOMCTL_getvcpucontext
:
785 __PRE_XEN_DOMCTL_READ(getvcpucontext
, vcpucontext
, vcpu
);
788 case VKI_XEN_DOMCTL_setvcpucontext
:
789 __PRE_XEN_DOMCTL_READ(setvcpucontext
, vcpucontext
, vcpu
);
790 __PRE_XEN_DOMCTL_READ(setvcpucontext
, vcpucontext
, ctxt
.p
);
793 case VKI_XEN_DOMCTL_get_ext_vcpucontext
:
794 switch (domctl
->interface_version
)
798 __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext
, ext_vcpucontext_00000008
, vcpu
);
802 __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext
, ext_vcpucontext_00000009
, vcpu
);
806 VG_(dmsg
)("WARNING: VKI_XEN_DOMCTL_get_ext_vcpucontext domctl version %#"
807 PRIx32
" not implemented\n", domctl
->interface_version
);
808 SET_STATUS_Failure(VKI_EINVAL
);
813 case VKI_XEN_DOMCTL_set_ext_vcpucontext
:
814 switch (domctl
->interface_version
)
818 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
, vcpu
);
819 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
, size
);
820 #if defined(__i386__) || defined(__x86_64__)
821 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
822 syscall32_callback_eip
);
823 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
824 sysenter_callback_eip
);
825 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
826 syscall32_callback_cs
);
827 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
828 sysenter_callback_cs
);
829 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
830 syscall32_disables_events
);
831 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
832 sysenter_disables_events
);
834 if ( domctl
->u
.ext_vcpucontext_00000008
.size
>=
835 offsetof(struct vki_xen_domctl_ext_vcpucontext_00000008
, mcg_cap
) )
836 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000008
,
842 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
, vcpu
);
843 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
, size
);
844 #if defined(__i386__) || defined(__x86_64__)
845 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
846 syscall32_callback_eip
);
847 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
848 sysenter_callback_eip
);
849 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
850 syscall32_callback_cs
);
851 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
852 sysenter_callback_cs
);
853 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
854 syscall32_disables_events
);
855 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
856 sysenter_disables_events
);
858 if ( domctl
->u
.ext_vcpucontext_00000009
.size
>=
859 offsetof(struct vki_xen_domctl_ext_vcpucontext_00000009
, caps
) )
861 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
863 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
865 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext
, ext_vcpucontext_00000009
,
872 VG_(dmsg
)("WARNING: VKI_XEN_DOMCTL_set_ext_vcpucontext domctl version %#"
873 PRIx32
" not implemented\n", domctl
->interface_version
);
874 SET_STATUS_Failure(VKI_EINVAL
);
879 case VKI_XEN_DOMCTL_set_cpuid
:
880 PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
881 (Addr
)&domctl
->u
.cpuid
, sizeof(domctl
->u
.cpuid
));
884 case VKI_XEN_DOMCTL_getpageframeinfo3
:
885 PRE_XEN_DOMCTL_READ(getpageframeinfo3
, num
);
886 PRE_XEN_DOMCTL_READ(getpageframeinfo3
, array
.p
);
887 PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
888 (Addr
)domctl
->u
.getpageframeinfo3
.array
.p
,
889 domctl
->u
.getpageframeinfo3
.num
* sizeof(vki_xen_pfn_t
));
892 case VKI_XEN_DOMCTL_setvcpuextstate
:
893 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, vcpu
);
894 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, size
);
895 __PRE_XEN_DOMCTL_READ(setvcpuextstate
, vcpuextstate
, buffer
);
896 PRE_MEM_READ("XEN_DOMCTL_setvcpuextstate *u.vcpuextstate.buffer.p",
897 (Addr
)domctl
->u
.vcpuextstate
.buffer
.p
,
898 domctl
->u
.vcpuextstate
.size
);
901 case VKI_XEN_DOMCTL_getvcpuextstate
:
902 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, vcpu
);
903 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, xfeature_mask
);
904 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, size
);
905 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, buffer
);
908 case VKI_XEN_DOMCTL_shadow_op
:
909 PRE_XEN_DOMCTL_READ(shadow_op
, op
);
911 switch(domctl
->u
.shadow_op
.op
)
913 case VKI_XEN_DOMCTL_SHADOW_OP_OFF
:
914 /* No further inputs */
917 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE
:
918 PRE_XEN_DOMCTL_READ(shadow_op
, mode
);
919 switch(domctl
->u
.shadow_op
.mode
)
921 case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY
:
922 goto domctl_shadow_op_enable_logdirty
;
926 bad_subop(tid
, layout
, arrghs
, status
, flags
,
927 "__HYPERVISOR_domctl shadowop mode",
928 domctl
->u
.shadow_op
.mode
);
932 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY
:
933 domctl_shadow_op_enable_logdirty
:
934 /* No further inputs */
937 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN
:
938 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK
:
939 PRE_XEN_DOMCTL_READ(shadow_op
, dirty_bitmap
);
940 PRE_XEN_DOMCTL_READ(shadow_op
, pages
);
944 bad_subop(tid
, layout
, arrghs
, status
, flags
,
945 "__HYPERVISOR_domctl shadow(10)",
946 domctl
->u
.shadow_op
.op
);
951 case VKI_XEN_DOMCTL_set_max_evtchn
:
952 PRE_XEN_DOMCTL_READ(set_max_evtchn
, max_port
);
955 case VKI_XEN_DOMCTL_cacheflush
:
956 PRE_XEN_DOMCTL_READ(cacheflush
, start_pfn
);
957 PRE_XEN_DOMCTL_READ(cacheflush
, nr_pfns
);
960 case VKI_XEN_DOMCTL_set_access_required
:
961 PRE_XEN_DOMCTL_READ(access_required
, access_required
);
964 case VKI_XEN_DOMCTL_mem_event_op
:
965 PRE_XEN_DOMCTL_READ(mem_event_op
, op
);
966 PRE_XEN_DOMCTL_READ(mem_event_op
, mode
);
969 case VKI_XEN_DOMCTL_debug_op
:
970 PRE_XEN_DOMCTL_READ(debug_op
, op
);
971 PRE_XEN_DOMCTL_READ(debug_op
, vcpu
);
975 bad_subop(tid
, layout
, arrghs
, status
, flags
,
976 "__HYPERVISOR_domctl", domctl
->cmd
);
979 #undef PRE_XEN_DOMCTL_READ
980 #undef __PRE_XEN_DOMCTL_READ
985 unsigned long op
= ARG1
;
986 void *arg
= (void *)(unsigned long)ARG2
;
988 PRINT("__HYPERVISOR_hvm_op ( %ld, %p )", op
, arg
);
990 #define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) \
991 PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field, \
992 (Addr)&((_type*)arg)->_field, \
993 sizeof(((_type*)arg)->_field))
994 #define PRE_XEN_HVMOP_READ(_hvm_op, _field) \
995 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
998 case VKI_XEN_HVMOP_set_param
:
999 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, domid
);
1000 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, index
);
1001 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, value
);
1004 case VKI_XEN_HVMOP_get_param
:
1005 __PRE_XEN_HVMOP_READ(get_param
, struct vki_xen_hvm_param
, domid
);
1006 __PRE_XEN_HVMOP_READ(get_param
, struct vki_xen_hvm_param
, index
);
1009 case VKI_XEN_HVMOP_set_isa_irq_level
:
1010 PRE_XEN_HVMOP_READ(set_isa_irq_level
, domid
);
1011 PRE_XEN_HVMOP_READ(set_isa_irq_level
, isa_irq
);
1012 PRE_XEN_HVMOP_READ(set_isa_irq_level
, level
);
1015 case VKI_XEN_HVMOP_set_pci_link_route
:
1016 PRE_XEN_HVMOP_READ(set_pci_link_route
, domid
);
1017 PRE_XEN_HVMOP_READ(set_pci_link_route
, link
);
1018 PRE_XEN_HVMOP_READ(set_pci_link_route
, isa_irq
);
1021 case VKI_XEN_HVMOP_set_mem_type
:
1022 PRE_XEN_HVMOP_READ(set_mem_type
, domid
);
1023 PRE_XEN_HVMOP_READ(set_mem_type
, hvmmem_type
);
1024 PRE_XEN_HVMOP_READ(set_mem_type
, nr
);
1025 PRE_XEN_HVMOP_READ(set_mem_type
, first_pfn
);
1028 case VKI_XEN_HVMOP_set_mem_access
:
1029 PRE_XEN_HVMOP_READ(set_mem_access
, domid
);
1030 PRE_XEN_HVMOP_READ(set_mem_access
, hvmmem_access
);
1031 PRE_XEN_HVMOP_READ(set_mem_access
, first_pfn
);
1032 /* if default access */
1033 if ( ((vki_xen_hvm_set_mem_access_t
*)arg
)->first_pfn
!= ~0ULL)
1034 PRE_XEN_HVMOP_READ(set_mem_access
, nr
);
1037 case VKI_XEN_HVMOP_get_mem_access
:
1038 PRE_XEN_HVMOP_READ(get_mem_access
, domid
);
1039 PRE_XEN_HVMOP_READ(get_mem_access
, pfn
);
1041 PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
1042 (Addr
)&(((vki_xen_hvm_get_mem_access_t
*)arg
)->hvmmem_access
),
1043 sizeof(vki_uint16_t
));
1046 case VKI_XEN_HVMOP_inject_trap
:
1047 PRE_XEN_HVMOP_READ(inject_trap
, domid
);
1048 PRE_XEN_HVMOP_READ(inject_trap
, vcpuid
);
1049 PRE_XEN_HVMOP_READ(inject_trap
, vector
);
1050 PRE_XEN_HVMOP_READ(inject_trap
, type
);
1051 PRE_XEN_HVMOP_READ(inject_trap
, error_code
);
1052 PRE_XEN_HVMOP_READ(inject_trap
, insn_len
);
1053 PRE_XEN_HVMOP_READ(inject_trap
, cr2
);
1057 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1058 "__HYPERVISOR_hvm_op", op
);
1061 #undef __PRE_XEN_HVMOP_READ
1062 #undef PRE_XEN_HVMOP_READ
1067 struct vki_xen_tmem_op
*tmem
= (struct vki_xen_tmem_op
*)ARG1
;
1069 PRINT("__HYPERVISOR_tmem_op ( %d )", tmem
->cmd
);
1071 /* Common part for xen_tmem_op:
1074 PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1
, sizeof(vki_uint32_t
));
1077 #define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field) \
1078 PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field, \
1079 (Addr)&tmem->u._union._field, \
1080 sizeof(tmem->u._union._field))
1081 #define PRE_XEN_TMEMOP_READ(_tmem, _field) \
1082 __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
1086 case VKI_XEN_TMEM_control
:
1088 /* Common part for control hypercall:
1089 * vki_int32_t pool_id;
1090 * vki_uint32_t subop;
1092 PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
1093 (Addr
)&tmem
->pool_id
, sizeof(tmem
->pool_id
));
1094 PRE_XEN_TMEMOP_READ(ctrl
, subop
);
1096 switch (tmem
->u
.ctrl
.subop
) {
1098 case VKI_XEN_TMEMC_save_begin
:
1099 PRE_XEN_TMEMOP_READ(ctrl
, cli_id
);
1100 PRE_XEN_TMEMOP_READ(ctrl
, arg1
);
1101 PRE_XEN_TMEMOP_READ(ctrl
, buf
);
1105 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1106 "__HYPERVISOR_tmem_op_control", tmem
->u
.ctrl
.subop
);
1112 bad_subop(tid
, layout
, arrghs
, status
, flags
,
1113 "__HYPERVISOR_tmem_op", ARG1
);
1116 #undef PRE_XEN_TMEMOP_READ
1117 #undef __PRE_XEN_TMEMOP_READ
1123 case VKI_XENMEM_maximum_ram_page
:
1124 case VKI_XENMEM_set_memory_map
:
1125 case VKI_XENMEM_decrease_reservation
:
1126 case VKI_XENMEM_claim_pages
:
1127 case VKI_XENMEM_maximum_gpfn
:
1128 case VKI_XENMEM_remove_from_physmap
:
1129 case VKI_XENMEM_access_op
:
1132 case VKI_XENMEM_increase_reservation
:
1133 case VKI_XENMEM_populate_physmap
: {
1134 struct xen_memory_reservation
*memory_reservation
=
1135 (struct xen_memory_reservation
*)ARG2
;
1137 POST_MEM_WRITE((Addr
)memory_reservation
->extent_start
.p
,
1138 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
1142 case VKI_XENMEM_machphys_mfn_list
: {
1143 struct vki_xen_machphys_mfn_list
*arg
=
1144 (struct vki_xen_machphys_mfn_list
*)ARG2
;
1145 POST_MEM_WRITE((Addr
)&arg
->nr_extents
, sizeof(arg
->nr_extents
));
1146 POST_MEM_WRITE((Addr
)arg
->extent_start
.p
,
1147 sizeof(vki_xen_pfn_t
) * arg
->nr_extents
);
1151 case VKI_XENMEM_add_to_physmap
: {
1152 struct vki_xen_add_to_physmap
*arg
=
1153 (struct vki_xen_add_to_physmap
*)ARG2
;
1154 if (arg
->space
== VKI_XENMAPSPACE_gmfn_range
)
1155 POST_MEM_WRITE(ARG2
, sizeof(*arg
));
1158 case VKI_XENMEM_get_sharing_freed_pages
:
1159 case VKI_XENMEM_get_sharing_shared_pages
:
1167 unsigned int *pdone
= (unsigned int *)ARG3
;
1169 POST_MEM_WRITE((Addr
)pdone
, sizeof(*pdone
));
1172 static void post_evtchn_op(ThreadId tid
, __vki_u32 cmd
, void *arg
, int compat
)
1175 case VKI_XEN_EVTCHNOP_alloc_unbound
: {
1176 struct vki_xen_evtchn_alloc_unbound
*alloc_unbound
= arg
;
1177 POST_MEM_WRITE((Addr
)&alloc_unbound
->port
, sizeof(alloc_unbound
->port
));
1185 post_evtchn_op(tid
, ARG1
, (void *)ARG2
, 0);
1188 POST(evtchn_op_compat
)
1190 struct vki_xen_evtchn_op
*evtchn
= (struct vki_xen_evtchn_op
*)ARG1
;
1191 post_evtchn_op(tid
, evtchn
->cmd
, &evtchn
->u
, 1);
1197 case VKI_XENVER_version
:
1200 case VKI_XENVER_extraversion
:
1201 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_extraversion_t
));
1203 case VKI_XENVER_compile_info
:
1204 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_compile_info
));
1206 case VKI_XENVER_capabilities
:
1207 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_capabilities_info_t
));
1209 case VKI_XENVER_changeset
:
1210 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_changeset_info_t
));
1212 case VKI_XENVER_platform_parameters
:
1213 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_platform_parameters
));
1215 case VKI_XENVER_get_features
:
1216 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_feature_info
));
1218 case VKI_XENVER_pagesize
:
1221 case VKI_XENVER_guest_handle
:
1222 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_domain_handle_t
));
1224 case VKI_XENVER_commandline
:
1225 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_commandline_t
));
1230 POST(grant_table_op
)
1233 case VKI_XEN_GNTTABOP_setup_table
: {
1234 struct vki_xen_gnttab_setup_table
*gst
=
1235 (struct vki_xen_gnttab_setup_table
*)ARG2
;
1236 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1237 (Addr
)&gst
->status
, sizeof(gst
->status
));
1238 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1239 (Addr
)gst
->frame_list
.p
,
1240 sizeof(*gst
->frame_list
.p
) & gst
->nr_frames
);
1248 struct vki_xen_sysctl
*sysctl
= (struct vki_xen_sysctl
*)ARG1
;
1250 switch (sysctl
->interface_version
)
1261 #define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
1262 POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
1263 sizeof(sysctl->u._union._field))
1264 #define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1265 __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1267 switch (sysctl
->cmd
) {
1268 case VKI_XEN_SYSCTL_readconsole
:
1269 POST_MEM_WRITE((Addr
)sysctl
->u
.readconsole
.buffer
.p
,
1270 sysctl
->u
.readconsole
.count
* sizeof(char));
1273 case VKI_XEN_SYSCTL_getdomaininfolist
:
1274 switch (sysctl
->interface_version
)
1277 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008
, num_domains
);
1278 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000008
.buffer
.p
,
1279 sizeof(*sysctl
->u
.getdomaininfolist_00000008
.buffer
.p
)
1280 * sysctl
->u
.getdomaininfolist_00000008
.num_domains
);
1283 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009
, num_domains
);
1284 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000009
.buffer
.p
,
1285 sizeof(*sysctl
->u
.getdomaininfolist_00000009
.buffer
.p
)
1286 * sysctl
->u
.getdomaininfolist_00000009
.num_domains
);
1290 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a
, num_domains
);
1291 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_0000000a
.buffer
.p
,
1292 sizeof(*sysctl
->u
.getdomaininfolist_0000000a
.buffer
.p
)
1293 * sysctl
->u
.getdomaininfolist_0000000a
.num_domains
);
1298 case VKI_XEN_SYSCTL_sched_id
:
1299 POST_XEN_SYSCTL_WRITE(sched_id
, sched_id
);
1302 case VKI_XEN_SYSCTL_cpupool_op
:
1303 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
||
1304 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
)
1305 POST_XEN_SYSCTL_WRITE(cpupool_op
, cpupool_id
);
1306 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
) {
1307 POST_XEN_SYSCTL_WRITE(cpupool_op
, sched_id
);
1308 POST_XEN_SYSCTL_WRITE(cpupool_op
, n_dom
);
1310 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
||
1311 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO
)
1312 POST_XEN_SYSCTL_WRITE(cpupool_op
, cpumap
);
1315 case VKI_XEN_SYSCTL_physinfo
:
1316 switch (sysctl
->interface_version
)
1319 case 0x00000009: /* Unchanged from version 8 */
1320 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, threads_per_core
);
1321 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, cores_per_socket
);
1322 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, nr_cpus
);
1323 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, max_cpu_id
);
1324 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, nr_nodes
);
1325 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, max_node_id
);
1326 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, cpu_khz
);
1327 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, total_pages
);
1328 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, free_pages
);
1329 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, scrub_pages
);
1330 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, hw_cap
[8]);
1331 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, capabilities
);
1335 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, threads_per_core
);
1336 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, cores_per_socket
);
1337 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, nr_cpus
);
1338 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, max_cpu_id
);
1339 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, nr_nodes
);
1340 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, max_node_id
);
1341 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, cpu_khz
);
1342 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, total_pages
);
1343 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, free_pages
);
1344 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, scrub_pages
);
1345 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, outstanding_pages
);
1346 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, hw_cap
[8]);
1347 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, capabilities
);
1352 case VKI_XEN_SYSCTL_topologyinfo
:
1353 POST_XEN_SYSCTL_WRITE(topologyinfo
, max_cpu_index
);
1354 if (sysctl
->u
.topologyinfo
.cpu_to_core
.p
)
1355 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_core
.p
,
1356 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1357 if (sysctl
->u
.topologyinfo
.cpu_to_socket
.p
)
1358 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_socket
.p
,
1359 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1360 if (sysctl
->u
.topologyinfo
.cpu_to_node
.p
)
1361 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_node
.p
,
1362 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1365 case VKI_XEN_SYSCTL_numainfo
:
1366 POST_XEN_SYSCTL_WRITE(numainfo
, max_node_index
);
1367 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_memsize
.p
,
1368 sizeof(uint64_t) * sysctl
->u
.numainfo
.max_node_index
);
1369 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_memfree
.p
,
1370 sizeof(uint64_t) * sysctl
->u
.numainfo
.max_node_index
);
1371 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_node_distance
.p
,
1372 sizeof(uint32_t) * sysctl
->u
.numainfo
.max_node_index
);
1376 case VKI_XEN_SYSCTL_debug_keys
:
1379 #undef POST_XEN_SYSCTL_WRITE
1380 #undef __POST_XEN_SYSCTL_WRITE
1384 struct vki_xen_domctl
*domctl
= (struct vki_xen_domctl
*)ARG1
;
1386 switch (domctl
->interface_version
) {
1396 #define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
1397 POST_MEM_WRITE((Addr)&domctl->u._union._field, \
1398 sizeof(domctl->u._union._field));
1399 #define POST_XEN_DOMCTL_WRITE(_domctl, _field) \
1400 __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
1402 switch (domctl
->cmd
) {
1403 case VKI_XEN_DOMCTL_createdomain
:
1404 case VKI_XEN_DOMCTL_destroydomain
:
1405 case VKI_XEN_DOMCTL_pausedomain
:
1406 case VKI_XEN_DOMCTL_max_mem
:
1407 case VKI_XEN_DOMCTL_setvcpuextstate
:
1408 case VKI_XEN_DOMCTL_set_address_size
:
1409 case VKI_XEN_DOMCTL_settscinfo
:
1410 case VKI_XEN_DOMCTL_ioport_permission
:
1411 case VKI_XEN_DOMCTL_hypercall_init
:
1412 case VKI_XEN_DOMCTL_setvcpucontext
:
1413 case VKI_XEN_DOMCTL_set_ext_vcpucontext
:
1414 case VKI_XEN_DOMCTL_setnodeaffinity
:
1415 case VKI_XEN_DOMCTL_set_cpuid
:
1416 case VKI_XEN_DOMCTL_unpausedomain
:
1417 case VKI_XEN_DOMCTL_sethvmcontext
:
1418 case VKI_XEN_DOMCTL_debug_op
:
1419 case VKI_XEN_DOMCTL_set_max_evtchn
:
1420 case VKI_XEN_DOMCTL_cacheflush
:
1421 case VKI_XEN_DOMCTL_resumedomain
:
1422 case VKI_XEN_DOMCTL_set_access_required
:
1423 /* No output fields */
1426 case VKI_XEN_DOMCTL_max_vcpus
:
1427 POST_XEN_DOMCTL_WRITE(max_vcpus
, max
);
1430 case VKI_XEN_DOMCTL_get_address_size
:
1431 __POST_XEN_DOMCTL_WRITE(get_address_size
, address_size
, size
);
1434 case VKI_XEN_DOMCTL_gettscinfo
:
1435 __POST_XEN_DOMCTL_WRITE(settscinfo
, tsc_info
, info
.tsc_mode
);
1436 __POST_XEN_DOMCTL_WRITE(settscinfo
, tsc_info
, info
.gtsc_khz
);
1437 __POST_XEN_DOMCTL_WRITE(settscinfo
, tsc_info
, info
.incarnation
);
1438 __POST_XEN_DOMCTL_WRITE(settscinfo
, tsc_info
, info
.elapsed_nsec
);
1441 case VKI_XEN_DOMCTL_getvcpuinfo
:
1442 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, online
);
1443 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, blocked
);
1444 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, running
);
1445 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, cpu_time
);
1446 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, cpu
);
1449 case VKI_XEN_DOMCTL_gethvmcontext
:
1450 /* Xen unconditionally writes size... */
1451 __POST_XEN_DOMCTL_WRITE(gethvmcontext
, hvmcontext
, size
);
1452 /* ...but only writes to the buffer if it was non NULL */
1453 if ( domctl
->u
.hvmcontext
.buffer
.p
)
1454 POST_MEM_WRITE((Addr
)domctl
->u
.hvmcontext
.buffer
.p
,
1455 sizeof(*domctl
->u
.hvmcontext
.buffer
.p
)
1456 * domctl
->u
.hvmcontext
.size
);
1459 case VKI_XEN_DOMCTL_gethvmcontext_partial
:
1460 switch (domctl
->u
.hvmcontext_partial
.type
) {
1461 case VKI_HVM_SAVE_CODE(CPU
):
1462 if ( domctl
->u
.hvmcontext_partial
.buffer
.p
)
1463 POST_MEM_WRITE((Addr
)domctl
->u
.hvmcontext_partial
.buffer
.p
,
1464 VKI_HVM_SAVE_LENGTH(CPU
));
1469 case VKI_XEN_DOMCTL_scheduler_op
:
1470 if ( domctl
->u
.scheduler_op
.cmd
== VKI_XEN_DOMCTL_SCHEDOP_getinfo
) {
1471 switch(domctl
->u
.scheduler_op
.sched_id
) {
1472 case VKI_XEN_SCHEDULER_SEDF
:
1473 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.period
);
1474 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.slice
);
1475 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.latency
);
1476 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.extratime
);
1477 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.weight
);
1479 case VKI_XEN_SCHEDULER_CREDIT
:
1480 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit
.weight
);
1481 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit
.cap
);
1483 case VKI_XEN_SCHEDULER_CREDIT2
:
1484 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit2
.weight
);
1486 case VKI_XEN_SCHEDULER_ARINC653
:
1488 case VKI_XEN_SCHEDULER_RTDS
:
1489 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.rtds
.period
);
1490 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.rtds
.budget
);
1496 case VKI_XEN_DOMCTL_getvcpuaffinity
:
1497 case VKI_XEN_DOMCTL_setvcpuaffinity
: /* Writes back actual result */
1498 switch (domctl
->interface_version
) {
1502 POST_MEM_WRITE((Addr
)domctl
->u
.vcpuaffinity_00000009
.cpumap
.bitmap
.p
,
1503 domctl
->u
.vcpuaffinity_00000009
.cpumap
.nr_bits
/ 8);
1506 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_HARD
)
1508 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.bitmap
.p
,
1509 domctl
->u
.vcpuaffinity_0000000a
.cpumap_hard
.nr_bits
/ 8);
1510 if (domctl
->u
.vcpuaffinity_0000000a
.flags
& VKI_XEN_VCPUAFFINITY_SOFT
)
1512 (Addr
)domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.bitmap
.p
,
1513 domctl
->u
.vcpuaffinity_0000000a
.cpumap_soft
.nr_bits
/ 8);
1517 case VKI_XEN_DOMCTL_getnodeaffinity
:
1518 POST_MEM_WRITE((Addr
)domctl
->u
.nodeaffinity
.nodemap
.bitmap
.p
,
1519 domctl
->u
.nodeaffinity
.nodemap
.nr_bits
/ 8);
1522 case VKI_XEN_DOMCTL_getdomaininfo
:
1523 switch (domctl
->interface_version
) {
1525 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, domain
);
1526 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, flags
);
1527 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, tot_pages
);
1528 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, max_pages
);
1529 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, shr_pages
);
1530 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, shared_info_frame
);
1531 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, cpu_time
);
1532 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, nr_online_vcpus
);
1533 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, max_vcpu_id
);
1534 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, ssidref
);
1535 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, handle
);
1536 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, cpupool
);
1539 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, domain
);
1540 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, flags
);
1541 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, tot_pages
);
1542 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, max_pages
);
1543 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, shr_pages
);
1544 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, paged_pages
);
1545 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, shared_info_frame
);
1546 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, cpu_time
);
1547 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, nr_online_vcpus
);
1548 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, max_vcpu_id
);
1549 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, ssidref
);
1550 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, handle
);
1551 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, cpupool
);
1555 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, domain
);
1556 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, flags
);
1557 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, tot_pages
);
1558 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, max_pages
);
1559 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, outstanding_pages
);
1560 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, shr_pages
);
1561 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, paged_pages
);
1562 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, shared_info_frame
);
1563 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, cpu_time
);
1564 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, nr_online_vcpus
);
1565 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, max_vcpu_id
);
1566 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, ssidref
);
1567 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, handle
);
1568 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, cpupool
);
1572 case VKI_XEN_DOMCTL_getvcpucontext
:
1573 __POST_XEN_DOMCTL_WRITE(getvcpucontext
, vcpucontext
, ctxt
.p
);
1576 case VKI_XEN_DOMCTL_getpageframeinfo3
:
1577 POST_MEM_WRITE((Addr
)domctl
->u
.getpageframeinfo3
.array
.p
,
1578 domctl
->u
.getpageframeinfo3
.num
* sizeof(vki_xen_pfn_t
));
1581 case VKI_XEN_DOMCTL_get_ext_vcpucontext
:
1582 switch (domctl
->interface_version
)
1586 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
, size
);
1587 #if defined(__i386__) || defined(__x86_64__)
1588 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
1589 syscall32_callback_eip
);
1590 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
1591 sysenter_callback_eip
);
1592 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
1593 syscall32_callback_cs
);
1594 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
1595 sysenter_callback_cs
);
1596 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
1597 syscall32_disables_events
);
1598 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
1599 sysenter_disables_events
);
1601 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000008
,
1607 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
, size
);
1608 #if defined(__i386__) || defined(__x86_64__)
1609 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1610 syscall32_callback_eip
);
1611 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1612 sysenter_callback_eip
);
1613 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1614 syscall32_callback_cs
);
1615 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1616 sysenter_callback_cs
);
1617 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1618 syscall32_disables_events
);
1619 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1620 sysenter_disables_events
);
1622 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1624 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1626 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext
, ext_vcpucontext_00000009
,
1634 case VKI_XEN_DOMCTL_getvcpuextstate
:
1635 if (domctl
->u
.vcpuextstate
.buffer
.p
)
1636 POST_MEM_WRITE((Addr
)domctl
->u
.vcpuextstate
.buffer
.p
,
1637 domctl
->u
.vcpuextstate
.size
);
1640 case VKI_XEN_DOMCTL_shadow_op
:
1641 switch(domctl
->u
.shadow_op
.op
)
1643 case VKI_XEN_DOMCTL_SHADOW_OP_OFF
:
1647 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN
:
1648 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK
:
1649 POST_XEN_DOMCTL_WRITE(shadow_op
, pages
);
1650 POST_XEN_DOMCTL_WRITE(shadow_op
, stats
.fault_count
);
1651 POST_XEN_DOMCTL_WRITE(shadow_op
, stats
.dirty_count
);
1652 if(domctl
->u
.shadow_op
.dirty_bitmap
.p
)
1653 POST_MEM_WRITE((Addr
)domctl
->u
.shadow_op
.dirty_bitmap
.p
,
1654 domctl
->u
.shadow_op
.pages
* sizeof(vki_uint8_t
));
1661 case VKI_XEN_DOMCTL_mem_event_op
:
1662 POST_XEN_DOMCTL_WRITE(mem_event_op
, port
);
1666 #undef POST_XEN_DOMCTL_WRITE
1667 #undef __POST_XEN_DOMCTL_WRITE
1672 unsigned long op
= ARG1
;
1673 void *arg
= (void *)(unsigned long)ARG2
;
1675 #define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) \
1676 POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
1677 sizeof(((_type*)arg)->_field))
1678 #define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
1679 __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1682 case VKI_XEN_HVMOP_set_param
:
1683 case VKI_XEN_HVMOP_set_isa_irq_level
:
1684 case VKI_XEN_HVMOP_set_pci_link_route
:
1685 case VKI_XEN_HVMOP_set_mem_type
:
1686 case VKI_XEN_HVMOP_set_mem_access
:
1687 case VKI_XEN_HVMOP_inject_trap
:
1688 /* No output paramters */
1691 case VKI_XEN_HVMOP_get_param
:
1692 __POST_XEN_HVMOP_WRITE(get_param
, struct vki_xen_hvm_param
, value
);
1695 case VKI_XEN_HVMOP_get_mem_access
:
1696 POST_XEN_HVMOP_WRITE(get_mem_access
, hvmmem_access
);
1699 #undef __POST_XEN_HVMOP_WRITE
1700 #undef POST_XEN_HVMOP_WRITE
1705 struct vki_xen_tmem_op
*tmem
= (struct vki_xen_tmem_op
*)ARG1
;
1709 case VKI_XEN_TMEM_control
:
1711 switch(tmem
->u
.ctrl
.subop
) {
1713 case VKI_XEN_TMEMC_save_begin
:
1723 SyscallTableEntry entry
;
1726 XenHypercallTableEntry
;
1728 #define HYPX_(const, name, nr_args) \
1729 [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
1730 #define HYPXY(const, name, nr_args) \
1731 [const] = { { vgSysWrap_xen_##name##_before, \
1732 vgSysWrap_xen_##name##_after }, \
1735 static XenHypercallTableEntry hypercall_table
[] = {
1736 // __VKI_XEN_set_trap_table // 0
1737 // __VKI_XEN_mmu_update // 1
1738 // __VKI_XEN_set_gdt // 2
1739 // __VKI_XEN_stack_switch // 3
1740 // __VKI_XEN_set_callbacks // 4
1742 // __VKI_XEN_fpu_taskswitch // 5
1743 // __VKI_XEN_sched_op_compat // 6
1744 // __VKI_XEN_platform_op // 7
1745 // __VKI_XEN_set_debugreg // 8
1746 // __VKI_XEN_get_debugreg // 9
1748 // __VKI_XEN_update_descriptor // 10
1750 HYPXY(__VKI_XEN_memory_op
, memory_op
, 2), // 12
1751 // __VKI_XEN_multicall // 13
1752 // __VKI_XEN_update_va_mapping // 14
1754 // __VKI_XEN_set_timer_op // 15
1755 HYPXY(__VKI_XEN_event_channel_op_compat
, evtchn_op_compat
, 1), // 16
1756 HYPXY(__VKI_XEN_xen_version
, xen_version
, 2), // 17
1757 // __VKI_XEN_console_io // 18
1758 // __VKI_XEN_physdev_op_compat // 19
1760 HYPXY(__VKI_XEN_grant_table_op
, grant_table_op
, 3), // 20
1761 // __VKI_XEN_vm_assist // 21
1762 // __VKI_XEN_update_va_mapping_otherdomain // 22
1763 // __VKI_XEN_iret, iret // 23
1764 // __VKI_XEN_vcpu_op, vcpu_op // 24
1766 // __VKI_XEN_set_segment_base // 25
1767 HYPXY(__VKI_XEN_mmuext_op
, mmuext_op
, 2), // 26
1768 // __VKI_XEN_xsm_op // 27
1769 // __VKI_XEN_nmi_op // 28
1770 // __VKI_XEN_sched_op // 29
1772 // __VKI_XEN_callback_op // 30
1773 // __VKI_XEN_xenoprof_op // 31
1774 HYPXY(__VKI_XEN_event_channel_op
, evtchn_op
, 2), // 32
1775 // __VKI_XEN_physdev_op // 33
1776 HYPXY(__VKI_XEN_hvm_op
, hvm_op
, 2), // 34
1778 HYPXY(__VKI_XEN_sysctl
, sysctl
, 1), // 35
1779 HYPXY(__VKI_XEN_domctl
, domctl
, 1), // 36
1780 // __VKI_XEN_kexec_op // 37
1781 HYPXY(__VKI_XEN_tmem_op
, tmem_op
, 1), // 38
1784 static void bad_before ( ThreadId tid
,
1785 SyscallArgLayout
* layout
,
1786 /*MOD*/SyscallArgs
* args
,
1787 /*OUT*/SyscallStatus
* status
,
1788 /*OUT*/UWord
* flags
)
1790 VG_(dmsg
)("WARNING: unhandled hypercall: %s\n",
1791 VG_SYSNUM_STRING(args
->sysno
));
1792 if (VG_(clo_verbosity
) > 1) {
1793 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
1795 VG_(dmsg
)("You may be able to write your own handler.\n");
1796 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1797 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
1798 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
1799 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
1801 SET_STATUS_Failure(VKI_ENOSYS
);
1804 static XenHypercallTableEntry bad_hyper
=
1805 { { bad_before
, NULL
}, 0 };
1807 static XenHypercallTableEntry
* ML_(get_xen_hypercall_entry
) ( UInt sysno
)
1809 XenHypercallTableEntry
*ret
= &bad_hyper
;
1811 const UInt hypercall_table_size
1812 = sizeof(hypercall_table
) / sizeof(hypercall_table
[0]);
1814 /* Is it in the contiguous initial section of the table? */
1815 if (sysno
< hypercall_table_size
) {
1816 XenHypercallTableEntry
* ent
= &hypercall_table
[sysno
];
1817 if (ent
->entry
.before
!= NULL
)
1821 /* Can't find a wrapper */
1825 DEFN_PRE_TEMPLATE(xen
, hypercall
)
1827 XenHypercallTableEntry
*ent
= ML_(get_xen_hypercall_entry
)(SYSNO
);
1829 /* Return number of arguments consumed */
1830 ARG8
= ent
->nr_args
;
1833 vg_assert(ent
->entry
.before
);
1834 (ent
->entry
.before
)( tid
, layout
, arrghs
, status
, flags
);
1838 DEFN_POST_TEMPLATE(xen
, hypercall
)
1840 XenHypercallTableEntry
*ent
= ML_(get_xen_hypercall_entry
)(SYSNO
);
1842 /* Return number of arguments consumed */
1843 ARG8
= ent
->nr_args
;
1846 if (ent
->entry
.after
)
1847 (ent
->entry
.after
)( tid
, arrghs
, status
);
1850 #endif // defined(ENABLE_XEN)