2 /*--------------------------------------------------------------------*/
3 /*--- Xen Hypercalls syswrap-xen.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2012 Citrix Systems
11 ian.campbell@citrix.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
34 #if defined(ENABLE_XEN)
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
38 #include "pub_core_threadstate.h"
39 #include "pub_core_aspacemgr.h"
40 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
41 #include "pub_core_transtab.h" // VG_(discard_translations)
42 #include "pub_core_xarray.h"
43 #include "pub_core_clientstate.h"
44 #include "pub_core_debuglog.h"
45 #include "pub_core_libcbase.h"
46 #include "pub_core_libcassert.h"
47 #include "pub_core_libcfile.h"
48 #include "pub_core_libcprint.h"
49 #include "pub_core_libcproc.h"
50 #include "pub_core_libcsignal.h"
51 #include "pub_core_mallocfree.h"
52 #include "pub_core_tooliface.h"
53 #include "pub_core_options.h"
54 #include "pub_core_scheduler.h"
55 #include "pub_core_signals.h"
56 #include "pub_core_syscall.h"
57 #include "pub_core_syswrap.h"
58 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
60 #include "priv_types_n_macros.h"
61 #include "priv_syswrap-generic.h"
62 #include "priv_syswrap-xen.h"
66 #define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
67 #define POST(name) static DEFN_POST_TEMPLATE(xen, name)
69 static void bad_subop ( ThreadId tid
,
70 SyscallArgLayout
* layout
,
71 /*MOD*/SyscallArgs
* args
,
72 /*OUT*/SyscallStatus
* status
,
74 const HChar
* hypercall
,
77 VG_(dmsg
)("WARNING: unhandled %s subop: %ld\n",
79 if (VG_(clo_verbosity
) > 1) {
80 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
82 VG_(dmsg
)("You may be able to write your own handler.\n");
83 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
84 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
85 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
86 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
88 SET_STATUS_Failure(VKI_ENOSYS
);
93 PRINT("__HYPERVISOR_memory_op ( %ld, %lx )", ARG1
, ARG2
);
97 case VKI_XENMEM_maximum_ram_page
:
101 case VKI_XENMEM_maximum_gpfn
:
102 PRE_MEM_READ("XENMEM_maximum_gpfn domid",
103 (Addr
)ARG2
, sizeof(vki_xen_domid_t
));
106 case VKI_XENMEM_machphys_mfn_list
: {
107 struct vki_xen_machphys_mfn_list
*arg
=
108 (struct vki_xen_machphys_mfn_list
*)ARG2
;
109 PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
110 (Addr
)&arg
->max_extents
, sizeof(arg
->max_extents
));
111 PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
112 (Addr
)&arg
->extent_start
, sizeof(arg
->extent_start
));
116 case VKI_XENMEM_set_memory_map
: {
117 struct vki_xen_foreign_memory_map
*arg
=
118 (struct vki_xen_foreign_memory_map
*)ARG2
;
119 PRE_MEM_READ("XENMEM_set_memory_map domid",
120 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
121 PRE_MEM_READ("XENMEM_set_memory_map map",
122 (Addr
)&arg
->map
, sizeof(arg
->map
));
125 case VKI_XENMEM_increase_reservation
:
126 case VKI_XENMEM_decrease_reservation
:
127 case VKI_XENMEM_populate_physmap
:
128 case VKI_XENMEM_claim_pages
: {
129 struct xen_memory_reservation
*memory_reservation
=
130 (struct xen_memory_reservation
*)ARG2
;
134 case VKI_XENMEM_increase_reservation
:
135 which
= "XENMEM_increase_reservation";
137 case VKI_XENMEM_decrease_reservation
:
138 which
= "XENMEM_decrease_reservation";
140 (Addr
)memory_reservation
->extent_start
.p
,
141 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
143 case VKI_XENMEM_populate_physmap
:
144 which
= "XENMEM_populate_physmap";
146 (Addr
)memory_reservation
->extent_start
.p
,
147 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
149 case VKI_XENMEM_claim_pages
:
150 which
= "XENMEM_claim_pages";
153 which
= "XENMEM_unknown";
158 (Addr
)&memory_reservation
->extent_start
,
159 sizeof(memory_reservation
->extent_start
));
161 (Addr
)&memory_reservation
->nr_extents
,
162 sizeof(memory_reservation
->nr_extents
));
164 (Addr
)&memory_reservation
->extent_order
,
165 sizeof(memory_reservation
->extent_order
));
167 (Addr
)&memory_reservation
->mem_flags
,
168 sizeof(memory_reservation
->mem_flags
));
170 (Addr
)&memory_reservation
->domid
,
171 sizeof(memory_reservation
->domid
));
175 case VKI_XENMEM_add_to_physmap
: {
176 struct vki_xen_add_to_physmap
*arg
=
177 (struct vki_xen_add_to_physmap
*)ARG2
;
178 PRE_MEM_READ("XENMEM_add_to_physmap domid",
179 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
180 PRE_MEM_READ("XENMEM_add_to_physmap size",
181 (Addr
)&arg
->size
, sizeof(arg
->size
));
182 PRE_MEM_READ("XENMEM_add_to_physmap space",
183 (Addr
)&arg
->space
, sizeof(arg
->space
));
184 PRE_MEM_READ("XENMEM_add_to_physmap idx",
185 (Addr
)&arg
->idx
, sizeof(arg
->idx
));
186 PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
187 (Addr
)&arg
->gpfn
, sizeof(arg
->gpfn
));
191 case VKI_XENMEM_remove_from_physmap
: {
192 struct vki_xen_remove_from_physmap
*arg
=
193 (struct vki_xen_remove_from_physmap
*)ARG2
;
194 PRE_MEM_READ("XENMEM_remove_from_physmap domid",
195 (Addr
)&arg
->domid
, sizeof(arg
->domid
));
196 PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
197 (Addr
)&arg
->gpfn
, sizeof(arg
->gpfn
));
201 case VKI_XENMEM_get_sharing_freed_pages
:
202 case VKI_XENMEM_get_sharing_shared_pages
:
205 case VKI_XENMEM_access_op
: {
206 struct vki_xen_mem_event_op
*arg
=
207 (struct vki_xen_mem_event_op
*)ARG2
;
208 PRE_MEM_READ("XENMEM_access_op domid",
209 (Addr
)&arg
->domain
, sizeof(arg
->domain
));
210 PRE_MEM_READ("XENMEM_access_op op",
211 (Addr
)&arg
->op
, sizeof(arg
->op
));
212 PRE_MEM_READ("XENMEM_access_op gfn",
213 (Addr
)&arg
->gfn
, sizeof(arg
->gfn
));
217 bad_subop(tid
, layout
, arrghs
, status
, flags
,
218 "__HYPERVISOR_memory_op", ARG1
);
225 struct vki_xen_mmuext_op
*ops
= (struct vki_xen_mmuext_op
*)ARG1
;
226 unsigned int i
, nr
= ARG2
;
228 for (i
=0; i
<nr
; i
++) {
229 struct vki_xen_mmuext_op
*op
= ops
+ i
;
230 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
231 (Addr
)&op
->cmd
, sizeof(op
->cmd
));
233 case VKI_XEN_MMUEXT_PIN_L1_TABLE
:
234 case VKI_XEN_MMUEXT_PIN_L2_TABLE
:
235 case VKI_XEN_MMUEXT_PIN_L3_TABLE
:
236 case VKI_XEN_MMUEXT_PIN_L4_TABLE
:
237 case VKI_XEN_MMUEXT_UNPIN_TABLE
:
238 case VKI_XEN_MMUEXT_NEW_BASEPTR
:
239 case VKI_XEN_MMUEXT_CLEAR_PAGE
:
240 case VKI_XEN_MMUEXT_COPY_PAGE
:
241 case VKI_XEN_MMUEXT_MARK_SUPER
:
242 case VKI_XEN_MMUEXT_UNMARK_SUPER
:
243 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
245 sizeof(op
->arg1
.mfn
));
248 case VKI_XEN_MMUEXT_INVLPG_LOCAL
:
249 case VKI_XEN_MMUEXT_INVLPG_ALL
:
250 case VKI_XEN_MMUEXT_SET_LDT
:
251 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
252 (Addr
)&op
->arg1
.linear_addr
,
253 sizeof(op
->arg1
.linear_addr
));
256 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL
:
257 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI
:
258 case VKI_XEN_MMUEXT_INVLPG_MULTI
:
259 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL
:
260 case VKI_XEN_MMUEXT_FLUSH_CACHE
:
261 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR
:
262 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL
:
268 case VKI_XEN_MMUEXT_SET_LDT
:
269 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
270 (Addr
)&op
->arg2
.nr_ents
,
271 sizeof(op
->arg2
.nr_ents
));
274 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI
:
275 case VKI_XEN_MMUEXT_INVLPG_MULTI
:
277 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
278 (Addr
)&op
->arg2
.vcpumask
,
279 sizeof(op
->arg2
.vcpumask
));
282 case VKI_XEN_MMUEXT_COPY_PAGE
:
283 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
284 (Addr
)&op
->arg2
.src_mfn
,
285 sizeof(op
->arg2
.src_mfn
));
288 case VKI_XEN_MMUEXT_PIN_L1_TABLE
:
289 case VKI_XEN_MMUEXT_PIN_L2_TABLE
:
290 case VKI_XEN_MMUEXT_PIN_L3_TABLE
:
291 case VKI_XEN_MMUEXT_PIN_L4_TABLE
:
292 case VKI_XEN_MMUEXT_UNPIN_TABLE
:
293 case VKI_XEN_MMUEXT_NEW_BASEPTR
:
294 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL
:
295 case VKI_XEN_MMUEXT_INVLPG_LOCAL
:
296 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL
:
297 case VKI_XEN_MMUEXT_INVLPG_ALL
:
298 case VKI_XEN_MMUEXT_FLUSH_CACHE
:
299 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR
:
300 case VKI_XEN_MMUEXT_CLEAR_PAGE
:
301 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL
:
302 case VKI_XEN_MMUEXT_MARK_SUPER
:
303 case VKI_XEN_MMUEXT_UNMARK_SUPER
:
310 static void pre_evtchn_op(ThreadId tid
,
311 SyscallArgLayout
* layout
,
312 /*MOD*/SyscallArgs
* arrghs
,
313 /*OUT*/SyscallStatus
* status
,
315 __vki_u32 cmd
, void *arg
, int compat
)
317 PRINT("__HYPERVISOR_event_channel_op%s ( %d, %p )",
318 compat
? "_compat" : "", cmd
, arg
);
321 case VKI_XEN_EVTCHNOP_alloc_unbound
: {
322 struct vki_xen_evtchn_alloc_unbound
*alloc_unbound
= arg
;
323 PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
324 (Addr
)&alloc_unbound
->dom
, sizeof(alloc_unbound
->dom
));
325 PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
326 (Addr
)&alloc_unbound
->remote_dom
,
327 sizeof(alloc_unbound
->remote_dom
));
332 bad_subop(tid
, layout
, arrghs
, status
, flags
,
333 "__HYPERVISOR_event_channel_op_compat", cmd
);
335 bad_subop(tid
, layout
, arrghs
, status
, flags
,
336 "__HYPERVISOR_event_channel_op", cmd
);
343 pre_evtchn_op(tid
, layout
, arrghs
, status
, flags
,
344 ARG1
, (void *)ARG2
, 0);
347 PRE(evtchn_op_compat
)
349 struct vki_xen_evtchn_op
*evtchn
= (struct vki_xen_evtchn_op
*)ARG1
;
350 PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
351 ARG1
, sizeof(*evtchn
));
353 pre_evtchn_op(tid
, layout
, arrghs
, status
, flags
,
354 evtchn
->cmd
, &evtchn
->u
, 1);
359 PRINT("__HYPERVISOR_xen_version ( %ld, %lx )", ARG1
, ARG2
);
362 case VKI_XENVER_version
:
363 case VKI_XENVER_extraversion
:
364 case VKI_XENVER_compile_info
:
365 case VKI_XENVER_capabilities
:
366 case VKI_XENVER_changeset
:
367 case VKI_XENVER_platform_parameters
:
368 case VKI_XENVER_get_features
:
369 case VKI_XENVER_pagesize
:
370 case VKI_XENVER_guest_handle
:
371 case VKI_XENVER_commandline
:
376 bad_subop(tid
, layout
, arrghs
, status
, flags
,
377 "__HYPERVISOR_xen_version", ARG1
);
384 PRINT("__HYPERVISOR_grant_table_op ( %ld, 0x%lx, %ld )", ARG1
, ARG2
, ARG3
);
386 case VKI_XEN_GNTTABOP_setup_table
: {
387 struct vki_xen_gnttab_setup_table
*gst
=
388 (struct vki_xen_gnttab_setup_table
*)ARG2
;
389 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
390 (Addr
)&gst
->dom
, sizeof(gst
->dom
));
391 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
392 (Addr
)&gst
->nr_frames
, sizeof(gst
->nr_frames
));
396 bad_subop(tid
, layout
, arrghs
, status
, flags
,
397 "__HYPERVISOR_grant_table_op", ARG1
);
403 struct vki_xen_sysctl
*sysctl
= (struct vki_xen_sysctl
*)ARG1
;
405 PRINT("__HYPERVISOR_sysctl ( %d )", sysctl
->cmd
);
408 * Common part of xen_sysctl:
410 * uint32_t interface_version;
412 PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1
,
413 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
));
418 switch (sysctl
->interface_version
)
425 VG_(dmsg
)("WARNING: sysctl version %"PRIx32
" not supported\n",
426 sysctl
->interface_version
);
427 if (VG_(clo_verbosity
) > 1) {
428 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
430 VG_(dmsg
)("You may be able to write your own handler.\n");
431 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
432 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
433 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
434 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
436 SET_STATUS_Failure(VKI_EINVAL
);
440 #define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) \
441 PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field, \
442 (Addr)&sysctl->u._union._field, \
443 sizeof(sysctl->u._union._field))
444 #define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
445 __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
447 switch (sysctl
->cmd
) {
448 case VKI_XEN_SYSCTL_readconsole
:
449 /* These are all unconditionally read */
450 PRE_XEN_SYSCTL_READ(readconsole
, clear
);
451 PRE_XEN_SYSCTL_READ(readconsole
, incremental
);
452 PRE_XEN_SYSCTL_READ(readconsole
, buffer
);
453 PRE_XEN_SYSCTL_READ(readconsole
, count
);
455 /* 'index' only read if 'incremental' is nonzero */
456 if (sysctl
->u
.readconsole
.incremental
)
457 PRE_XEN_SYSCTL_READ(readconsole
, index
);
460 case VKI_XEN_SYSCTL_getdomaininfolist
:
461 switch (sysctl
->interface_version
)
464 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, first_domain
);
465 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, max_domains
);
466 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008
, buffer
);
469 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, first_domain
);
470 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, max_domains
);
471 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009
, buffer
);
474 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, first_domain
);
475 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, max_domains
);
476 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a
, buffer
);
479 VG_(dmsg
)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
480 "%"PRIx32
" not implemented yet\n",
481 sysctl
->interface_version
);
482 SET_STATUS_Failure(VKI_EINVAL
);
487 case VKI_XEN_SYSCTL_debug_keys
:
488 PRE_XEN_SYSCTL_READ(debug_keys
, keys
);
489 PRE_XEN_SYSCTL_READ(debug_keys
, nr_keys
);
490 PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
491 (Addr
)sysctl
->u
.debug_keys
.keys
.p
,
492 sysctl
->u
.debug_keys
.nr_keys
* sizeof(char));
495 case VKI_XEN_SYSCTL_sched_id
:
499 case VKI_XEN_SYSCTL_cpupool_op
:
500 PRE_XEN_SYSCTL_READ(cpupool_op
, op
);
502 switch(sysctl
->u
.cpupool_op
.op
) {
503 case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
:
504 case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY
:
505 case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
:
506 case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU
:
507 case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU
:
508 case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN
:
509 PRE_XEN_SYSCTL_READ(cpupool_op
, cpupool_id
);
512 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
)
513 PRE_XEN_SYSCTL_READ(cpupool_op
, sched_id
);
515 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN
)
516 PRE_XEN_SYSCTL_READ(cpupool_op
, domid
);
518 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU
||
519 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU
)
520 PRE_XEN_SYSCTL_READ(cpupool_op
, cpu
);
524 case VKI_XEN_SYSCTL_physinfo
:
525 /* No input params */
528 case VKI_XEN_SYSCTL_topologyinfo
:
529 PRE_XEN_SYSCTL_READ(topologyinfo
, max_cpu_index
);
530 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_core
);
531 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_socket
);
532 PRE_XEN_SYSCTL_READ(topologyinfo
, cpu_to_node
);
535 case VKI_XEN_SYSCTL_numainfo
:
536 PRE_XEN_SYSCTL_READ(numainfo
, max_node_index
);
537 PRE_XEN_SYSCTL_READ(numainfo
, node_to_memsize
);
538 PRE_XEN_SYSCTL_READ(numainfo
, node_to_memfree
);
539 PRE_XEN_SYSCTL_READ(numainfo
, node_to_node_distance
);
543 bad_subop(tid
, layout
, arrghs
, status
, flags
,
544 "__HYPERVISOR_sysctl", sysctl
->cmd
);
547 #undef PRE_XEN_SYSCTL_READ
548 #undef __PRE_XEN_SYSCTL_READ
553 struct vki_xen_domctl
*domctl
= (struct vki_xen_domctl
*)ARG1
;
555 PRINT("__HYPERVISOR_domctl ( %d ) on dom%d", domctl
->cmd
, domctl
->domain
);
558 * Common part of xen_domctl:
560 * vki_uint32_t interface_version;
561 * vki_xen_domid_t domain;
563 PRE_MEM_READ("__HYPERVISOR_domctl", ARG1
,
564 sizeof(vki_uint32_t
) + sizeof(vki_uint32_t
)
565 + sizeof(vki_xen_domid_t
));
570 switch (domctl
->interface_version
)
577 VG_(dmsg
)("WARNING: domctl version %"PRIx32
" not supported\n",
578 domctl
->interface_version
);
579 if (VG_(clo_verbosity
) > 1) {
580 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
582 VG_(dmsg
)("You may be able to write your own handler.\n");
583 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
584 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
585 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
586 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
588 SET_STATUS_Failure(VKI_EINVAL
);
592 #define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) \
593 PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field, \
594 (Addr)&domctl->u._union._field, \
595 sizeof(domctl->u._union._field))
596 #define PRE_XEN_DOMCTL_READ(_domctl, _field) \
597 __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
599 switch (domctl
->cmd
) {
600 case VKI_XEN_DOMCTL_destroydomain
:
601 case VKI_XEN_DOMCTL_pausedomain
:
602 case VKI_XEN_DOMCTL_max_vcpus
:
603 case VKI_XEN_DOMCTL_get_address_size
:
604 case VKI_XEN_DOMCTL_gettscinfo
:
605 case VKI_XEN_DOMCTL_getdomaininfo
:
606 case VKI_XEN_DOMCTL_unpausedomain
:
607 case VKI_XEN_DOMCTL_resumedomain
:
608 /* No input fields. */
611 case VKI_XEN_DOMCTL_createdomain
:
612 PRE_XEN_DOMCTL_READ(createdomain
, ssidref
);
613 PRE_XEN_DOMCTL_READ(createdomain
, handle
);
614 PRE_XEN_DOMCTL_READ(createdomain
, flags
);
617 case VKI_XEN_DOMCTL_gethvmcontext
:
618 /* Xen unconditionally reads the 'buffer' pointer */
619 __PRE_XEN_DOMCTL_READ(gethvmcontext
, hvmcontext
, buffer
);
620 /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
621 * buffer is a request for the required size. */
622 if ( domctl
->u
.hvmcontext
.buffer
.p
)
623 __PRE_XEN_DOMCTL_READ(gethvmcontext
, hvmcontext
, size
);
626 case VKI_XEN_DOMCTL_sethvmcontext
:
627 __PRE_XEN_DOMCTL_READ(sethvmcontext
, hvmcontext
, size
);
628 __PRE_XEN_DOMCTL_READ(sethvmcontext
, hvmcontext
, buffer
);
629 PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
630 (Addr
)domctl
->u
.hvmcontext
.buffer
.p
,
631 domctl
->u
.hvmcontext
.size
);
634 case VKI_XEN_DOMCTL_gethvmcontext_partial
:
635 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, type
);
636 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, instance
);
637 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial
, hvmcontext_partial
, buffer
);
639 switch (domctl
->u
.hvmcontext_partial
.type
) {
640 case VKI_HVM_SAVE_CODE(CPU
):
641 if ( domctl
->u
.hvmcontext_partial
.buffer
.p
)
642 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
643 (Addr
)domctl
->u
.hvmcontext_partial
.buffer
.p
,
644 VKI_HVM_SAVE_LENGTH(CPU
));
647 bad_subop(tid
, layout
, arrghs
, status
, flags
,
648 "__HYPERVISOR_domctl_gethvmcontext_partial type",
649 domctl
->u
.hvmcontext_partial
.type
);
654 case VKI_XEN_DOMCTL_max_mem
:
655 PRE_XEN_DOMCTL_READ(max_mem
, max_memkb
);
658 case VKI_XEN_DOMCTL_set_address_size
:
659 __PRE_XEN_DOMCTL_READ(set_address_size
, address_size
, size
);
662 case VKI_XEN_DOMCTL_settscinfo
:
663 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info
, info
.tsc_mode
);
664 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info
, info
.gtsc_khz
);
665 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info
, info
.incarnation
);
666 __PRE_XEN_DOMCTL_READ(settscinfo
, tsc_info
, info
.elapsed_nsec
);
669 case VKI_XEN_DOMCTL_ioport_permission
:
670 PRE_XEN_DOMCTL_READ(ioport_permission
, first_port
);
671 PRE_XEN_DOMCTL_READ(ioport_permission
, nr_ports
);
672 PRE_XEN_DOMCTL_READ(ioport_permission
, allow_access
);
675 case VKI_XEN_DOMCTL_hypercall_init
:
676 PRE_XEN_DOMCTL_READ(hypercall_init
, gmfn
);
679 case VKI_XEN_DOMCTL_settimeoffset
:
680 PRE_XEN_DOMCTL_READ(settimeoffset
, time_offset_seconds
);
683 case VKI_XEN_DOMCTL_getvcpuinfo
:
684 PRE_XEN_DOMCTL_READ(getvcpuinfo
, vcpu
);
687 case VKI_XEN_DOMCTL_scheduler_op
:
688 PRE_XEN_DOMCTL_READ(scheduler_op
, sched_id
);
689 PRE_XEN_DOMCTL_READ(scheduler_op
, cmd
);
690 if ( domctl
->u
.scheduler_op
.cmd
== VKI_XEN_DOMCTL_SCHEDOP_putinfo
) {
691 switch(domctl
->u
.scheduler_op
.sched_id
) {
692 case VKI_XEN_SCHEDULER_SEDF
:
693 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.period
);
694 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.slice
);
695 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.latency
);
696 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.extratime
);
697 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.sedf
.weight
);
699 case VKI_XEN_SCHEDULER_CREDIT
:
700 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit
.weight
);
701 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit
.cap
);
703 case VKI_XEN_SCHEDULER_CREDIT2
:
704 PRE_XEN_DOMCTL_READ(scheduler_op
, u
.credit2
.weight
);
706 case VKI_XEN_SCHEDULER_ARINC653
:
712 case VKI_XEN_DOMCTL_getvcpuaffinity
:
713 __PRE_XEN_DOMCTL_READ(getvcpuaffinity
, vcpuaffinity
, vcpu
);
716 case VKI_XEN_DOMCTL_setvcpuaffinity
:
717 __PRE_XEN_DOMCTL_READ(setvcpuaffinity
, vcpuaffinity
, vcpu
);
718 PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
719 (Addr
)domctl
->u
.vcpuaffinity
.cpumap
.bitmap
.p
,
720 domctl
->u
.vcpuaffinity
.cpumap
.nr_bits
/ 8);
723 case VKI_XEN_DOMCTL_getnodeaffinity
:
724 __PRE_XEN_DOMCTL_READ(nodeaffinity
, nodeaffinity
, nodemap
.nr_bits
);
726 case VKI_XEN_DOMCTL_setnodeaffinity
:
727 __PRE_XEN_DOMCTL_READ(nodeaffinity
, nodeaffinity
, nodemap
.nr_bits
);
728 PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
729 (Addr
)domctl
->u
.nodeaffinity
.nodemap
.bitmap
.p
,
730 domctl
->u
.nodeaffinity
.nodemap
.nr_bits
/ 8);
733 case VKI_XEN_DOMCTL_getvcpucontext
:
734 __PRE_XEN_DOMCTL_READ(getvcpucontext
, vcpucontext
, vcpu
);
737 case VKI_XEN_DOMCTL_setvcpucontext
:
738 __PRE_XEN_DOMCTL_READ(setvcpucontext
, vcpucontext
, vcpu
);
739 __PRE_XEN_DOMCTL_READ(setvcpucontext
, vcpucontext
, ctxt
.p
);
742 case VKI_XEN_DOMCTL_set_cpuid
:
743 PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
744 (Addr
)&domctl
->u
.cpuid
, sizeof(domctl
->u
.cpuid
));
747 case VKI_XEN_DOMCTL_getpageframeinfo3
:
748 PRE_XEN_DOMCTL_READ(getpageframeinfo3
, num
);
749 PRE_XEN_DOMCTL_READ(getpageframeinfo3
, array
.p
);
750 PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
751 (Addr
)domctl
->u
.getpageframeinfo3
.array
.p
,
752 domctl
->u
.getpageframeinfo3
.num
* sizeof(vki_xen_pfn_t
));
755 case VKI_XEN_DOMCTL_getvcpuextstate
:
756 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, vcpu
);
757 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, xfeature_mask
);
758 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, size
);
759 __PRE_XEN_DOMCTL_READ(getvcpuextstate
, vcpuextstate
, buffer
);
762 case VKI_XEN_DOMCTL_shadow_op
:
763 PRE_XEN_DOMCTL_READ(shadow_op
, op
);
765 switch(domctl
->u
.shadow_op
.op
)
767 case VKI_XEN_DOMCTL_SHADOW_OP_OFF
:
768 /* No further inputs */
771 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE
:
772 PRE_XEN_DOMCTL_READ(shadow_op
, mode
);
773 switch(domctl
->u
.shadow_op
.mode
)
775 case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY
:
776 goto domctl_shadow_op_enable_logdirty
;
780 bad_subop(tid
, layout
, arrghs
, status
, flags
,
781 "__HYPERVISOR_domctl shadowop mode",
782 domctl
->u
.shadow_op
.mode
);
786 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY
:
787 domctl_shadow_op_enable_logdirty
:
788 /* No further inputs */
791 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN
:
792 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK
:
793 PRE_XEN_DOMCTL_READ(shadow_op
, dirty_bitmap
);
794 PRE_XEN_DOMCTL_READ(shadow_op
, pages
);
798 bad_subop(tid
, layout
, arrghs
, status
, flags
,
799 "__HYPERVISOR_domctl shadow(10)",
800 domctl
->u
.shadow_op
.op
);
805 case VKI_XEN_DOMCTL_set_max_evtchn
:
806 PRE_XEN_DOMCTL_READ(set_max_evtchn
, max_port
);
809 case VKI_XEN_DOMCTL_cacheflush
:
810 PRE_XEN_DOMCTL_READ(cacheflush
, start_pfn
);
811 PRE_XEN_DOMCTL_READ(cacheflush
, nr_pfns
);
814 case VKI_XEN_DOMCTL_set_access_required
:
815 PRE_XEN_DOMCTL_READ(access_required
, access_required
);
818 case VKI_XEN_DOMCTL_mem_event_op
:
819 PRE_XEN_DOMCTL_READ(mem_event_op
, op
);
820 PRE_XEN_DOMCTL_READ(mem_event_op
, mode
);
823 case VKI_XEN_DOMCTL_debug_op
:
824 PRE_XEN_DOMCTL_READ(debug_op
, op
);
825 PRE_XEN_DOMCTL_READ(debug_op
, vcpu
);
829 bad_subop(tid
, layout
, arrghs
, status
, flags
,
830 "__HYPERVISOR_domctl", domctl
->cmd
);
833 #undef PRE_XEN_DOMCTL_READ
834 #undef __PRE_XEN_DOMCTL_READ
839 unsigned long op
= ARG1
;
840 void *arg
= (void *)(unsigned long)ARG2
;
842 PRINT("__HYPERVISOR_hvm_op ( %ld, %p )", op
, arg
);
844 #define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) \
845 PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field, \
846 (Addr)&((_type*)arg)->_field, \
847 sizeof(((_type*)arg)->_field))
848 #define PRE_XEN_HVMOP_READ(_hvm_op, _field) \
849 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
852 case VKI_XEN_HVMOP_set_param
:
853 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, domid
);
854 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, index
);
855 __PRE_XEN_HVMOP_READ(set_param
, struct vki_xen_hvm_param
, value
);
858 case VKI_XEN_HVMOP_get_param
:
859 __PRE_XEN_HVMOP_READ(get_param
, struct vki_xen_hvm_param
, domid
);
860 __PRE_XEN_HVMOP_READ(get_param
, struct vki_xen_hvm_param
, index
);
863 case VKI_XEN_HVMOP_set_isa_irq_level
:
864 PRE_XEN_HVMOP_READ(set_isa_irq_level
, domid
);
865 PRE_XEN_HVMOP_READ(set_isa_irq_level
, isa_irq
);
866 PRE_XEN_HVMOP_READ(set_isa_irq_level
, level
);
869 case VKI_XEN_HVMOP_set_pci_link_route
:
870 PRE_XEN_HVMOP_READ(set_pci_link_route
, domid
);
871 PRE_XEN_HVMOP_READ(set_pci_link_route
, link
);
872 PRE_XEN_HVMOP_READ(set_pci_link_route
, isa_irq
);
875 case VKI_XEN_HVMOP_set_mem_type
:
876 PRE_XEN_HVMOP_READ(set_mem_type
, domid
);
877 PRE_XEN_HVMOP_READ(set_mem_type
, hvmmem_type
);
878 PRE_XEN_HVMOP_READ(set_mem_type
, nr
);
879 PRE_XEN_HVMOP_READ(set_mem_type
, first_pfn
);
882 case VKI_XEN_HVMOP_set_mem_access
:
883 PRE_XEN_HVMOP_READ(set_mem_access
, domid
);
884 PRE_XEN_HVMOP_READ(set_mem_access
, hvmmem_access
);
885 PRE_XEN_HVMOP_READ(set_mem_access
, first_pfn
);
886 /* if default access */
887 if ( ((vki_xen_hvm_set_mem_access_t
*)arg
)->first_pfn
!= ~0ULL)
888 PRE_XEN_HVMOP_READ(set_mem_access
, nr
);
891 case VKI_XEN_HVMOP_get_mem_access
:
892 PRE_XEN_HVMOP_READ(get_mem_access
, domid
);
893 PRE_XEN_HVMOP_READ(get_mem_access
, pfn
);
895 PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
896 (Addr
)&(((vki_xen_hvm_get_mem_access_t
*)arg
)->hvmmem_access
),
897 sizeof(vki_uint16_t
));
900 case VKI_XEN_HVMOP_inject_trap
:
901 PRE_XEN_HVMOP_READ(inject_trap
, domid
);
902 PRE_XEN_HVMOP_READ(inject_trap
, vcpuid
);
903 PRE_XEN_HVMOP_READ(inject_trap
, vector
);
904 PRE_XEN_HVMOP_READ(inject_trap
, type
);
905 PRE_XEN_HVMOP_READ(inject_trap
, error_code
);
906 PRE_XEN_HVMOP_READ(inject_trap
, insn_len
);
907 PRE_XEN_HVMOP_READ(inject_trap
, cr2
);
911 bad_subop(tid
, layout
, arrghs
, status
, flags
,
912 "__HYPERVISOR_hvm_op", op
);
915 #undef __PRE_XEN_HVMOP_READ
916 #undef PRE_XEN_HVMOP_READ
921 struct vki_xen_tmem_op
*tmem
= (struct vki_xen_tmem_op
*)ARG1
;
923 PRINT("__HYPERVISOR_tmem_op ( %d )", tmem
->cmd
);
925 /* Common part for xen_tmem_op:
928 PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1
, sizeof(vki_uint32_t
));
931 #define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field) \
932 PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field, \
933 (Addr)&tmem->u._union._field, \
934 sizeof(tmem->u._union._field))
935 #define PRE_XEN_TMEMOP_READ(_tmem, _field) \
936 __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
940 case VKI_XEN_TMEM_control
:
942 /* Common part for control hypercall:
943 * vki_int32_t pool_id;
944 * vki_uint32_t subop;
946 PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
947 (Addr
)&tmem
->pool_id
, sizeof(&tmem
->pool_id
));
948 PRE_XEN_TMEMOP_READ(ctrl
, subop
);
950 switch (tmem
->u
.ctrl
.subop
) {
952 case VKI_XEN_TMEMC_save_begin
:
953 PRE_XEN_TMEMOP_READ(ctrl
, cli_id
);
954 PRE_XEN_TMEMOP_READ(ctrl
, arg1
);
955 PRE_XEN_TMEMOP_READ(ctrl
, buf
);
959 bad_subop(tid
, layout
, arrghs
, status
, flags
,
960 "__HYPERVISOR_tmem_op_control", tmem
->u
.ctrl
.subop
);
966 bad_subop(tid
, layout
, arrghs
, status
, flags
,
967 "__HYPERVISOR_tmem_op", ARG1
);
970 #undef PRE_XEN_TMEMOP_READ
971 #undef __PRE_XEN_TMEMOP_READ
977 case VKI_XENMEM_maximum_ram_page
:
978 case VKI_XENMEM_set_memory_map
:
979 case VKI_XENMEM_decrease_reservation
:
980 case VKI_XENMEM_claim_pages
:
981 case VKI_XENMEM_maximum_gpfn
:
982 case VKI_XENMEM_remove_from_physmap
:
983 case VKI_XENMEM_access_op
:
986 case VKI_XENMEM_increase_reservation
:
987 case VKI_XENMEM_populate_physmap
: {
988 struct xen_memory_reservation
*memory_reservation
=
989 (struct xen_memory_reservation
*)ARG2
;
991 POST_MEM_WRITE((Addr
)memory_reservation
->extent_start
.p
,
992 sizeof(vki_xen_pfn_t
) * memory_reservation
->nr_extents
);
996 case VKI_XENMEM_machphys_mfn_list
: {
997 struct vki_xen_machphys_mfn_list
*arg
=
998 (struct vki_xen_machphys_mfn_list
*)ARG2
;
999 POST_MEM_WRITE((Addr
)&arg
->nr_extents
, sizeof(arg
->nr_extents
));
1000 POST_MEM_WRITE((Addr
)arg
->extent_start
.p
,
1001 sizeof(vki_xen_pfn_t
) * arg
->nr_extents
);
1005 case VKI_XENMEM_add_to_physmap
: {
1006 struct vki_xen_add_to_physmap
*arg
=
1007 (struct vki_xen_add_to_physmap
*)ARG2
;
1008 if (arg
->space
== VKI_XENMAPSPACE_gmfn_range
)
1009 POST_MEM_WRITE(ARG2
, sizeof(*arg
));
1012 case VKI_XENMEM_get_sharing_freed_pages
:
1013 case VKI_XENMEM_get_sharing_shared_pages
:
1021 unsigned int *pdone
= (unsigned int *)ARG3
;
1023 POST_MEM_WRITE((Addr
)pdone
, sizeof(*pdone
));
1026 static void post_evtchn_op(ThreadId tid
, __vki_u32 cmd
, void *arg
, int compat
)
1029 case VKI_XEN_EVTCHNOP_alloc_unbound
: {
1030 struct vki_xen_evtchn_alloc_unbound
*alloc_unbound
= arg
;
1031 POST_MEM_WRITE((Addr
)&alloc_unbound
->port
, sizeof(alloc_unbound
->port
));
1039 post_evtchn_op(tid
, ARG1
, (void *)ARG2
, 0);
1042 POST(evtchn_op_compat
)
1044 struct vki_xen_evtchn_op
*evtchn
= (struct vki_xen_evtchn_op
*)ARG1
;
1045 post_evtchn_op(tid
, evtchn
->cmd
, &evtchn
->u
, 1);
1051 case VKI_XENVER_version
:
1054 case VKI_XENVER_extraversion
:
1055 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_extraversion_t
));
1057 case VKI_XENVER_compile_info
:
1058 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_compile_info
));
1060 case VKI_XENVER_capabilities
:
1061 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_capabilities_info_t
));
1063 case VKI_XENVER_changeset
:
1064 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_changeset_info_t
));
1066 case VKI_XENVER_platform_parameters
:
1067 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_platform_parameters
));
1069 case VKI_XENVER_get_features
:
1070 POST_MEM_WRITE((Addr
)ARG2
, sizeof(struct vki_xen_feature_info
));
1072 case VKI_XENVER_pagesize
:
1075 case VKI_XENVER_guest_handle
:
1076 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_domain_handle_t
));
1078 case VKI_XENVER_commandline
:
1079 POST_MEM_WRITE((Addr
)ARG2
, sizeof(vki_xen_commandline_t
));
1084 POST(grant_table_op
)
1087 case VKI_XEN_GNTTABOP_setup_table
: {
1088 struct vki_xen_gnttab_setup_table
*gst
=
1089 (struct vki_xen_gnttab_setup_table
*)ARG2
;
1090 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1091 (Addr
)&gst
->status
, sizeof(gst
->status
));
1092 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1093 (Addr
)gst
->frame_list
.p
,
1094 sizeof(*gst
->frame_list
.p
) & gst
->nr_frames
);
1102 struct vki_xen_sysctl
*sysctl
= (struct vki_xen_sysctl
*)ARG1
;
1104 switch (sysctl
->interface_version
)
1114 #define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
1115 POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
1116 sizeof(sysctl->u._union._field))
1117 #define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1118 __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1120 switch (sysctl
->cmd
) {
1121 case VKI_XEN_SYSCTL_readconsole
:
1122 POST_MEM_WRITE((Addr
)sysctl
->u
.readconsole
.buffer
.p
,
1123 sysctl
->u
.readconsole
.count
* sizeof(char));
1126 case VKI_XEN_SYSCTL_getdomaininfolist
:
1127 switch (sysctl
->interface_version
)
1130 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008
, num_domains
);
1131 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000008
.buffer
.p
,
1132 sizeof(*sysctl
->u
.getdomaininfolist_00000008
.buffer
.p
)
1133 * sysctl
->u
.getdomaininfolist_00000008
.num_domains
);
1136 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009
, num_domains
);
1137 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_00000009
.buffer
.p
,
1138 sizeof(*sysctl
->u
.getdomaininfolist_00000009
.buffer
.p
)
1139 * sysctl
->u
.getdomaininfolist_00000009
.num_domains
);
1142 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a
, num_domains
);
1143 POST_MEM_WRITE((Addr
)sysctl
->u
.getdomaininfolist_0000000a
.buffer
.p
,
1144 sizeof(*sysctl
->u
.getdomaininfolist_0000000a
.buffer
.p
)
1145 * sysctl
->u
.getdomaininfolist_0000000a
.num_domains
);
1150 case VKI_XEN_SYSCTL_sched_id
:
1151 POST_XEN_SYSCTL_WRITE(sched_id
, sched_id
);
1154 case VKI_XEN_SYSCTL_cpupool_op
:
1155 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE
||
1156 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
)
1157 POST_XEN_SYSCTL_WRITE(cpupool_op
, cpupool_id
);
1158 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
) {
1159 POST_XEN_SYSCTL_WRITE(cpupool_op
, sched_id
);
1160 POST_XEN_SYSCTL_WRITE(cpupool_op
, n_dom
);
1162 if (sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_INFO
||
1163 sysctl
->u
.cpupool_op
.op
== VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO
)
1164 POST_XEN_SYSCTL_WRITE(cpupool_op
, cpumap
);
1167 case VKI_XEN_SYSCTL_physinfo
:
1168 switch (sysctl
->interface_version
)
1171 case 0x00000009: /* Unchanged from version 8 */
1172 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, threads_per_core
);
1173 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, cores_per_socket
);
1174 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, nr_cpus
);
1175 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, max_cpu_id
);
1176 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, nr_nodes
);
1177 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, max_node_id
);
1178 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, cpu_khz
);
1179 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, total_pages
);
1180 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, free_pages
);
1181 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, scrub_pages
);
1182 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, hw_cap
[8]);
1183 POST_XEN_SYSCTL_WRITE(physinfo_00000008
, capabilities
);
1186 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, threads_per_core
);
1187 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, cores_per_socket
);
1188 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, nr_cpus
);
1189 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, max_cpu_id
);
1190 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, nr_nodes
);
1191 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, max_node_id
);
1192 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, cpu_khz
);
1193 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, total_pages
);
1194 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, free_pages
);
1195 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, scrub_pages
);
1196 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, outstanding_pages
);
1197 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, hw_cap
[8]);
1198 POST_XEN_SYSCTL_WRITE(physinfo_0000000a
, capabilities
);
1203 case VKI_XEN_SYSCTL_topologyinfo
:
1204 POST_XEN_SYSCTL_WRITE(topologyinfo
, max_cpu_index
);
1205 if (sysctl
->u
.topologyinfo
.cpu_to_core
.p
)
1206 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_core
.p
,
1207 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1208 if (sysctl
->u
.topologyinfo
.cpu_to_socket
.p
)
1209 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_socket
.p
,
1210 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1211 if (sysctl
->u
.topologyinfo
.cpu_to_node
.p
)
1212 POST_MEM_WRITE((Addr
)sysctl
->u
.topologyinfo
.cpu_to_node
.p
,
1213 sizeof(uint32_t) * sysctl
->u
.topologyinfo
.max_cpu_index
);
1216 case VKI_XEN_SYSCTL_numainfo
:
1217 POST_XEN_SYSCTL_WRITE(numainfo
, max_node_index
);
1218 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_memsize
.p
,
1219 sizeof(uint64_t) * sysctl
->u
.numainfo
.max_node_index
);
1220 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_memfree
.p
,
1221 sizeof(uint64_t) * sysctl
->u
.numainfo
.max_node_index
);
1222 POST_MEM_WRITE((Addr
)sysctl
->u
.numainfo
.node_to_node_distance
.p
,
1223 sizeof(uint32_t) * sysctl
->u
.numainfo
.max_node_index
);
1227 case VKI_XEN_SYSCTL_debug_keys
:
1230 #undef POST_XEN_SYSCTL_WRITE
1231 #undef __POST_XEN_SYSCTL_WRITE
1235 struct vki_xen_domctl
*domctl
= (struct vki_xen_domctl
*)ARG1
;
1237 switch (domctl
->interface_version
) {
1246 #define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
1247 POST_MEM_WRITE((Addr)&domctl->u._union._field, \
1248 sizeof(domctl->u._union._field));
1249 #define POST_XEN_DOMCTL_WRITE(_domctl, _field) \
1250 __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
1252 switch (domctl
->cmd
) {
1253 case VKI_XEN_DOMCTL_createdomain
:
1254 case VKI_XEN_DOMCTL_destroydomain
:
1255 case VKI_XEN_DOMCTL_pausedomain
:
1256 case VKI_XEN_DOMCTL_max_mem
:
1257 case VKI_XEN_DOMCTL_set_address_size
:
1258 case VKI_XEN_DOMCTL_settscinfo
:
1259 case VKI_XEN_DOMCTL_ioport_permission
:
1260 case VKI_XEN_DOMCTL_hypercall_init
:
1261 case VKI_XEN_DOMCTL_setvcpuaffinity
:
1262 case VKI_XEN_DOMCTL_setvcpucontext
:
1263 case VKI_XEN_DOMCTL_setnodeaffinity
:
1264 case VKI_XEN_DOMCTL_set_cpuid
:
1265 case VKI_XEN_DOMCTL_unpausedomain
:
1266 case VKI_XEN_DOMCTL_sethvmcontext
:
1267 case VKI_XEN_DOMCTL_debug_op
:
1268 case VKI_XEN_DOMCTL_set_max_evtchn
:
1269 case VKI_XEN_DOMCTL_cacheflush
:
1270 case VKI_XEN_DOMCTL_resumedomain
:
1271 case VKI_XEN_DOMCTL_set_access_required
:
1272 /* No output fields */
1275 case VKI_XEN_DOMCTL_max_vcpus
:
1276 POST_XEN_DOMCTL_WRITE(max_vcpus
, max
);
1279 case VKI_XEN_DOMCTL_get_address_size
:
1280 __POST_XEN_DOMCTL_WRITE(get_address_size
, address_size
, size
);
1283 case VKI_XEN_DOMCTL_gettscinfo
:
1284 __POST_XEN_DOMCTL_WRITE(settscinfo
, tsc_info
, info
.tsc_mode
);
1285 __POST_XEN_DOMCTL_WRITE(settscinfo
, tsc_info
, info
.gtsc_khz
);
1286 __POST_XEN_DOMCTL_WRITE(settscinfo
, tsc_info
, info
.incarnation
);
1287 __POST_XEN_DOMCTL_WRITE(settscinfo
, tsc_info
, info
.elapsed_nsec
);
1290 case VKI_XEN_DOMCTL_getvcpuinfo
:
1291 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, online
);
1292 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, blocked
);
1293 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, running
);
1294 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, cpu_time
);
1295 POST_XEN_DOMCTL_WRITE(getvcpuinfo
, cpu
);
1298 case VKI_XEN_DOMCTL_gethvmcontext
:
1299 /* Xen unconditionally writes size... */
1300 __POST_XEN_DOMCTL_WRITE(gethvmcontext
, hvmcontext
, size
);
1301 /* ...but only writes to the buffer if it was non NULL */
1302 if ( domctl
->u
.hvmcontext
.buffer
.p
)
1303 POST_MEM_WRITE((Addr
)domctl
->u
.hvmcontext
.buffer
.p
,
1304 sizeof(*domctl
->u
.hvmcontext
.buffer
.p
)
1305 * domctl
->u
.hvmcontext
.size
);
1308 case VKI_XEN_DOMCTL_gethvmcontext_partial
:
1309 switch (domctl
->u
.hvmcontext_partial
.type
) {
1310 case VKI_HVM_SAVE_CODE(CPU
):
1311 if ( domctl
->u
.hvmcontext_partial
.buffer
.p
)
1312 POST_MEM_WRITE((Addr
)domctl
->u
.hvmcontext_partial
.buffer
.p
,
1313 VKI_HVM_SAVE_LENGTH(CPU
));
1318 case VKI_XEN_DOMCTL_scheduler_op
:
1319 if ( domctl
->u
.scheduler_op
.cmd
== VKI_XEN_DOMCTL_SCHEDOP_getinfo
) {
1320 switch(domctl
->u
.scheduler_op
.sched_id
) {
1321 case VKI_XEN_SCHEDULER_SEDF
:
1322 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.period
);
1323 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.slice
);
1324 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.latency
);
1325 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.extratime
);
1326 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.sedf
.weight
);
1328 case VKI_XEN_SCHEDULER_CREDIT
:
1329 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit
.weight
);
1330 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit
.cap
);
1332 case VKI_XEN_SCHEDULER_CREDIT2
:
1333 POST_XEN_DOMCTL_WRITE(scheduler_op
, u
.credit2
.weight
);
1335 case VKI_XEN_SCHEDULER_ARINC653
:
1341 case VKI_XEN_DOMCTL_getvcpuaffinity
:
1342 POST_MEM_WRITE((Addr
)domctl
->u
.vcpuaffinity
.cpumap
.bitmap
.p
,
1343 domctl
->u
.vcpuaffinity
.cpumap
.nr_bits
/ 8);
1346 case VKI_XEN_DOMCTL_getnodeaffinity
:
1347 POST_MEM_WRITE((Addr
)domctl
->u
.nodeaffinity
.nodemap
.bitmap
.p
,
1348 domctl
->u
.nodeaffinity
.nodemap
.nr_bits
/ 8);
1351 case VKI_XEN_DOMCTL_getdomaininfo
:
1352 switch (domctl
->interface_version
) {
1354 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, domain
);
1355 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, flags
);
1356 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, tot_pages
);
1357 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, max_pages
);
1358 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, shr_pages
);
1359 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, shared_info_frame
);
1360 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, cpu_time
);
1361 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, nr_online_vcpus
);
1362 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, max_vcpu_id
);
1363 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, ssidref
);
1364 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, handle
);
1365 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007
, cpupool
);
1368 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, domain
);
1369 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, flags
);
1370 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, tot_pages
);
1371 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, max_pages
);
1372 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, shr_pages
);
1373 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, paged_pages
);
1374 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, shared_info_frame
);
1375 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, cpu_time
);
1376 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, nr_online_vcpus
);
1377 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, max_vcpu_id
);
1378 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, ssidref
);
1379 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, handle
);
1380 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008
, cpupool
);
1383 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, domain
);
1384 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, flags
);
1385 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, tot_pages
);
1386 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, max_pages
);
1387 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, outstanding_pages
);
1388 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, shr_pages
);
1389 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, paged_pages
);
1390 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, shared_info_frame
);
1391 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, cpu_time
);
1392 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, nr_online_vcpus
);
1393 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, max_vcpu_id
);
1394 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, ssidref
);
1395 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, handle
);
1396 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009
, cpupool
);
1400 case VKI_XEN_DOMCTL_getvcpucontext
:
1401 __POST_XEN_DOMCTL_WRITE(getvcpucontext
, vcpucontext
, ctxt
.p
);
1404 case VKI_XEN_DOMCTL_getpageframeinfo3
:
1405 POST_MEM_WRITE((Addr
)domctl
->u
.getpageframeinfo3
.array
.p
,
1406 domctl
->u
.getpageframeinfo3
.num
* sizeof(vki_xen_pfn_t
));
1410 case VKI_XEN_DOMCTL_getvcpuextstate
:
1411 __POST_XEN_DOMCTL_WRITE(getvcpuextstate
, vcpuextstate
, xfeature_mask
);
1412 __POST_XEN_DOMCTL_WRITE(getvcpuextstate
, vcpuextstate
, size
);
1413 POST_MEM_WRITE((Addr
)domctl
->u
.vcpuextstate
.buffer
.p
,
1414 domctl
->u
.vcpuextstate
.size
);
1417 case VKI_XEN_DOMCTL_shadow_op
:
1418 switch(domctl
->u
.shadow_op
.op
)
1420 case VKI_XEN_DOMCTL_SHADOW_OP_OFF
:
1424 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN
:
1425 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK
:
1426 POST_XEN_DOMCTL_WRITE(shadow_op
, pages
);
1427 POST_XEN_DOMCTL_WRITE(shadow_op
, stats
.fault_count
);
1428 POST_XEN_DOMCTL_WRITE(shadow_op
, stats
.dirty_count
);
1429 if(domctl
->u
.shadow_op
.dirty_bitmap
.p
)
1430 POST_MEM_WRITE((Addr
)domctl
->u
.shadow_op
.dirty_bitmap
.p
,
1431 domctl
->u
.shadow_op
.pages
* sizeof(vki_uint8_t
));
1438 case VKI_XEN_DOMCTL_mem_event_op
:
1439 POST_XEN_DOMCTL_WRITE(mem_event_op
, port
);
1443 #undef POST_XEN_DOMCTL_WRITE
1444 #undef __POST_XEN_DOMCTL_WRITE
1449 unsigned long op
= ARG1
;
1450 void *arg
= (void *)(unsigned long)ARG2
;
1452 #define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) \
1453 POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
1454 sizeof(((_type*)arg)->_field))
1455 #define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
1456 __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1459 case VKI_XEN_HVMOP_set_param
:
1460 case VKI_XEN_HVMOP_set_isa_irq_level
:
1461 case VKI_XEN_HVMOP_set_pci_link_route
:
1462 case VKI_XEN_HVMOP_set_mem_type
:
1463 case VKI_XEN_HVMOP_set_mem_access
:
1464 case VKI_XEN_HVMOP_inject_trap
:
1465 /* No output paramters */
1468 case VKI_XEN_HVMOP_get_param
:
1469 __POST_XEN_HVMOP_WRITE(get_param
, struct vki_xen_hvm_param
, value
);
1472 case VKI_XEN_HVMOP_get_mem_access
:
1473 POST_XEN_HVMOP_WRITE(get_mem_access
, hvmmem_access
);
1476 #undef __POST_XEN_HVMOP_WRITE
1477 #undef POST_XEN_HVMOP_WRITE
1482 struct vki_xen_tmem_op
*tmem
= (struct vki_xen_tmem_op
*)ARG1
;
1486 case VKI_XEN_TMEM_control
:
1488 switch(tmem
->u
.ctrl
.subop
) {
1490 case VKI_XEN_TMEMC_save_begin
:
1500 SyscallTableEntry entry
;
1503 XenHypercallTableEntry
;
1505 #define HYPX_(const, name, nr_args) \
1506 [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
1507 #define HYPXY(const, name, nr_args) \
1508 [const] = { { vgSysWrap_xen_##name##_before, \
1509 vgSysWrap_xen_##name##_after }, \
1512 static XenHypercallTableEntry hypercall_table
[] = {
1513 // __VKI_XEN_set_trap_table // 0
1514 // __VKI_XEN_mmu_update // 1
1515 // __VKI_XEN_set_gdt // 2
1516 // __VKI_XEN_stack_switch // 3
1517 // __VKI_XEN_set_callbacks // 4
1519 // __VKI_XEN_fpu_taskswitch // 5
1520 // __VKI_XEN_sched_op_compat // 6
1521 // __VKI_XEN_platform_op // 7
1522 // __VKI_XEN_set_debugreg // 8
1523 // __VKI_XEN_get_debugreg // 9
1525 // __VKI_XEN_update_descriptor // 10
1527 HYPXY(__VKI_XEN_memory_op
, memory_op
, 2), // 12
1528 // __VKI_XEN_multicall // 13
1529 // __VKI_XEN_update_va_mapping // 14
1531 // __VKI_XEN_set_timer_op // 15
1532 HYPXY(__VKI_XEN_event_channel_op_compat
, evtchn_op_compat
, 1), // 16
1533 HYPXY(__VKI_XEN_xen_version
, xen_version
, 2), // 17
1534 // __VKI_XEN_console_io // 18
1535 // __VKI_XEN_physdev_op_compat // 19
1537 HYPXY(__VKI_XEN_grant_table_op
, grant_table_op
, 3), // 20
1538 // __VKI_XEN_vm_assist // 21
1539 // __VKI_XEN_update_va_mapping_otherdomain // 22
1540 // __VKI_XEN_iret, iret // 23
1541 // __VKI_XEN_vcpu_op, vcpu_op // 24
1543 // __VKI_XEN_set_segment_base // 25
1544 HYPXY(__VKI_XEN_mmuext_op
, mmuext_op
, 2), // 26
1545 // __VKI_XEN_xsm_op // 27
1546 // __VKI_XEN_nmi_op // 28
1547 // __VKI_XEN_sched_op // 29
1549 // __VKI_XEN_callback_op // 30
1550 // __VKI_XEN_xenoprof_op // 31
1551 HYPXY(__VKI_XEN_event_channel_op
, evtchn_op
, 2), // 32
1552 // __VKI_XEN_physdev_op // 33
1553 HYPXY(__VKI_XEN_hvm_op
, hvm_op
, 2), // 34
1555 HYPXY(__VKI_XEN_sysctl
, sysctl
, 1), // 35
1556 HYPXY(__VKI_XEN_domctl
, domctl
, 1), // 36
1557 // __VKI_XEN_kexec_op // 37
1558 HYPXY(__VKI_XEN_tmem_op
, tmem_op
, 1), // 38
1561 static void bad_before ( ThreadId tid
,
1562 SyscallArgLayout
* layout
,
1563 /*MOD*/SyscallArgs
* args
,
1564 /*OUT*/SyscallStatus
* status
,
1565 /*OUT*/UWord
* flags
)
1567 VG_(dmsg
)("WARNING: unhandled hypercall: %s\n",
1568 VG_SYSNUM_STRING(args
->sysno
));
1569 if (VG_(clo_verbosity
) > 1) {
1570 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
1572 VG_(dmsg
)("You may be able to write your own handler.\n");
1573 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1574 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
1575 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html &\n");
1576 VG_(dmsg
)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
1578 SET_STATUS_Failure(VKI_ENOSYS
);
1581 static XenHypercallTableEntry bad_hyper
=
1582 { { bad_before
, NULL
}, 0 };
1584 static XenHypercallTableEntry
* ML_(get_xen_hypercall_entry
) ( UInt sysno
)
1586 XenHypercallTableEntry
*ret
= &bad_hyper
;
1588 const UInt hypercall_table_size
1589 = sizeof(hypercall_table
) / sizeof(hypercall_table
[0]);
1591 /* Is it in the contiguous initial section of the table? */
1592 if (sysno
< hypercall_table_size
) {
1593 XenHypercallTableEntry
* ent
= &hypercall_table
[sysno
];
1594 if (ent
->entry
.before
!= NULL
)
1598 /* Can't find a wrapper */
1602 DEFN_PRE_TEMPLATE(xen
, hypercall
)
1604 XenHypercallTableEntry
*ent
= ML_(get_xen_hypercall_entry
)(SYSNO
);
1606 /* Return number of arguments consumed */
1607 ARG8
= ent
->nr_args
;
1610 vg_assert(ent
->entry
.before
);
1611 (ent
->entry
.before
)( tid
, layout
, arrghs
, status
, flags
);
1615 DEFN_POST_TEMPLATE(xen
, hypercall
)
1617 XenHypercallTableEntry
*ent
= ML_(get_xen_hypercall_entry
)(SYSNO
);
1619 /* Return number of arguments consumed */
1620 ARG8
= ent
->nr_args
;
1623 if (ent
->entry
.after
)
1624 (ent
->entry
.after
)( tid
, arrghs
, status
);
1627 #endif // defined(ENABLE_XEN)