xen: Add support for new sysctl and domctl interface versions
[valgrind.git] / coregrind / m_syswrap / syswrap-xen.c
blob75ec1662f4ed9fc302a96e69342af7db7e54130a
2 /*--------------------------------------------------------------------*/
3 /*--- Xen Hypercalls syswrap-xen.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2012 Citrix Systems
11 ian.campbell@citrix.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
34 #if defined(ENABLE_XEN)
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
40 #include "pub_core_transtab.h" // VG_(discard_translations)
41 #include "pub_core_xarray.h"
42 #include "pub_core_clientstate.h"
43 #include "pub_core_debuglog.h"
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcassert.h"
46 #include "pub_core_libcfile.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_mallocfree.h"
51 #include "pub_core_tooliface.h"
52 #include "pub_core_options.h"
53 #include "pub_core_scheduler.h"
54 #include "pub_core_signals.h"
55 #include "pub_core_syscall.h"
56 #include "pub_core_syswrap.h"
57 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
59 #include "priv_types_n_macros.h"
60 #include "priv_syswrap-generic.h"
61 #include "priv_syswrap-xen.h"
63 #include <inttypes.h>
65 #define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
66 #define POST(name) static DEFN_POST_TEMPLATE(xen, name)
68 static void bad_intf_version ( ThreadId tid,
69 SyscallArgLayout* layout,
70 /*MOD*/SyscallArgs* args,
71 /*OUT*/SyscallStatus* status,
72 /*OUT*/UWord* flags,
73 const HChar* hypercall,
74 UWord version)
76 VG_(dmsg)("WARNING: %s version %#lx not supported\n",
77 hypercall, version);
78 if (VG_(clo_verbosity) > 1) {
79 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
81 VG_(dmsg)("You may be able to write your own handler.\n");
82 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
83 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
84 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
85 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
87 SET_STATUS_Failure(VKI_ENOSYS);
90 static void bad_subop ( ThreadId tid,
91 SyscallArgLayout* layout,
92 /*MOD*/SyscallArgs* args,
93 /*OUT*/SyscallStatus* status,
94 /*OUT*/UWord* flags,
95 const HChar* hypercall,
96 UWord subop)
98 VG_(dmsg)("WARNING: unhandled %s subop: %ld\n",
99 hypercall, subop);
100 if (VG_(clo_verbosity) > 1) {
101 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
103 VG_(dmsg)("You may be able to write your own handler.\n");
104 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
105 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
106 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
107 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
109 SET_STATUS_Failure(VKI_ENOSYS);
112 PRE(memory_op)
114 PRINT("__HYPERVISOR_memory_op ( %ld, %lx )", ARG1, ARG2);
116 switch (ARG1) {
118 case VKI_XENMEM_maximum_ram_page:
119 /* No inputs */
120 break;
122 case VKI_XENMEM_maximum_gpfn:
123 PRE_MEM_READ("XENMEM_maximum_gpfn domid",
124 (Addr)ARG2, sizeof(vki_xen_domid_t));
125 break;
127 case VKI_XENMEM_machphys_mfn_list: {
128 struct vki_xen_machphys_mfn_list *arg =
129 (struct vki_xen_machphys_mfn_list *)ARG2;
130 PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
131 (Addr)&arg->max_extents, sizeof(arg->max_extents));
132 PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
133 (Addr)&arg->extent_start, sizeof(arg->extent_start));
134 break;
137 case VKI_XENMEM_set_memory_map: {
138 struct vki_xen_foreign_memory_map *arg =
139 (struct vki_xen_foreign_memory_map *)ARG2;
140 PRE_MEM_READ("XENMEM_set_memory_map domid",
141 (Addr)&arg->domid, sizeof(arg->domid));
142 PRE_MEM_READ("XENMEM_set_memory_map map",
143 (Addr)&arg->map, sizeof(arg->map));
144 break;
146 case VKI_XENMEM_increase_reservation:
147 case VKI_XENMEM_decrease_reservation:
148 case VKI_XENMEM_populate_physmap:
149 case VKI_XENMEM_claim_pages: {
150 struct xen_memory_reservation *memory_reservation =
151 (struct xen_memory_reservation *)ARG2;
152 const HChar *which;
154 switch (ARG1) {
155 case VKI_XENMEM_increase_reservation:
156 which = "XENMEM_increase_reservation";
157 break;
158 case VKI_XENMEM_decrease_reservation:
159 which = "XENMEM_decrease_reservation";
160 PRE_MEM_READ(which,
161 (Addr)memory_reservation->extent_start.p,
162 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
163 break;
164 case VKI_XENMEM_populate_physmap:
165 which = "XENMEM_populate_physmap";
166 PRE_MEM_READ(which,
167 (Addr)memory_reservation->extent_start.p,
168 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
169 break;
170 case VKI_XENMEM_claim_pages:
171 which = "XENMEM_claim_pages";
172 break;
173 default:
174 which = "XENMEM_unknown";
175 break;
178 PRE_MEM_READ(which,
179 (Addr)&memory_reservation->extent_start,
180 sizeof(memory_reservation->extent_start));
181 PRE_MEM_READ(which,
182 (Addr)&memory_reservation->nr_extents,
183 sizeof(memory_reservation->nr_extents));
184 PRE_MEM_READ(which,
185 (Addr)&memory_reservation->extent_order,
186 sizeof(memory_reservation->extent_order));
187 PRE_MEM_READ(which,
188 (Addr)&memory_reservation->mem_flags,
189 sizeof(memory_reservation->mem_flags));
190 PRE_MEM_READ(which,
191 (Addr)&memory_reservation->domid,
192 sizeof(memory_reservation->domid));
193 break;
196 case VKI_XENMEM_add_to_physmap: {
197 struct vki_xen_add_to_physmap *arg =
198 (struct vki_xen_add_to_physmap *)ARG2;
199 PRE_MEM_READ("XENMEM_add_to_physmap domid",
200 (Addr)&arg->domid, sizeof(arg->domid));
201 PRE_MEM_READ("XENMEM_add_to_physmap size",
202 (Addr)&arg->size, sizeof(arg->size));
203 PRE_MEM_READ("XENMEM_add_to_physmap space",
204 (Addr)&arg->space, sizeof(arg->space));
205 PRE_MEM_READ("XENMEM_add_to_physmap idx",
206 (Addr)&arg->idx, sizeof(arg->idx));
207 PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
208 (Addr)&arg->gpfn, sizeof(arg->gpfn));
209 break;
212 case VKI_XENMEM_remove_from_physmap: {
213 struct vki_xen_remove_from_physmap *arg =
214 (struct vki_xen_remove_from_physmap *)ARG2;
215 PRE_MEM_READ("XENMEM_remove_from_physmap domid",
216 (Addr)&arg->domid, sizeof(arg->domid));
217 PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
218 (Addr)&arg->gpfn, sizeof(arg->gpfn));
219 break;
222 case VKI_XENMEM_get_sharing_freed_pages:
223 case VKI_XENMEM_get_sharing_shared_pages:
224 break;
226 case VKI_XENMEM_access_op: {
227 struct vki_xen_mem_event_op *arg =
228 (struct vki_xen_mem_event_op *)ARG2;
229 PRE_MEM_READ("XENMEM_access_op domid",
230 (Addr)&arg->domain, sizeof(arg->domain));
231 PRE_MEM_READ("XENMEM_access_op op",
232 (Addr)&arg->op, sizeof(arg->op));
233 PRE_MEM_READ("XENMEM_access_op gfn",
234 (Addr)&arg->gfn, sizeof(arg->gfn));
235 break;
237 default:
238 bad_subop(tid, layout, arrghs, status, flags,
239 "__HYPERVISOR_memory_op", ARG1);
240 break;
244 PRE(mmuext_op)
246 struct vki_xen_mmuext_op *ops = (struct vki_xen_mmuext_op *)ARG1;
247 unsigned int i, nr = ARG2;
249 for (i=0; i<nr; i++) {
250 struct vki_xen_mmuext_op *op = ops + i;
251 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
252 (Addr)&op->cmd, sizeof(op->cmd));
253 switch(op->cmd) {
254 case VKI_XEN_MMUEXT_PIN_L1_TABLE:
255 case VKI_XEN_MMUEXT_PIN_L2_TABLE:
256 case VKI_XEN_MMUEXT_PIN_L3_TABLE:
257 case VKI_XEN_MMUEXT_PIN_L4_TABLE:
258 case VKI_XEN_MMUEXT_UNPIN_TABLE:
259 case VKI_XEN_MMUEXT_NEW_BASEPTR:
260 case VKI_XEN_MMUEXT_CLEAR_PAGE:
261 case VKI_XEN_MMUEXT_COPY_PAGE:
262 case VKI_XEN_MMUEXT_MARK_SUPER:
263 case VKI_XEN_MMUEXT_UNMARK_SUPER:
264 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
265 (Addr)&op->arg1.mfn,
266 sizeof(op->arg1.mfn));
267 break;
269 case VKI_XEN_MMUEXT_INVLPG_LOCAL:
270 case VKI_XEN_MMUEXT_INVLPG_ALL:
271 case VKI_XEN_MMUEXT_SET_LDT:
272 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
273 (Addr)&op->arg1.linear_addr,
274 sizeof(op->arg1.linear_addr));
275 break;
277 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
278 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
279 case VKI_XEN_MMUEXT_INVLPG_MULTI:
280 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
281 case VKI_XEN_MMUEXT_FLUSH_CACHE:
282 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
283 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
284 /* None */
285 break;
288 switch(op->cmd) {
289 case VKI_XEN_MMUEXT_SET_LDT:
290 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
291 (Addr)&op->arg2.nr_ents,
292 sizeof(op->arg2.nr_ents));
293 break;
295 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
296 case VKI_XEN_MMUEXT_INVLPG_MULTI:
297 /* How many??? */
298 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
299 (Addr)&op->arg2.vcpumask,
300 sizeof(op->arg2.vcpumask));
301 break;
303 case VKI_XEN_MMUEXT_COPY_PAGE:
304 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
305 (Addr)&op->arg2.src_mfn,
306 sizeof(op->arg2.src_mfn));
307 break;
309 case VKI_XEN_MMUEXT_PIN_L1_TABLE:
310 case VKI_XEN_MMUEXT_PIN_L2_TABLE:
311 case VKI_XEN_MMUEXT_PIN_L3_TABLE:
312 case VKI_XEN_MMUEXT_PIN_L4_TABLE:
313 case VKI_XEN_MMUEXT_UNPIN_TABLE:
314 case VKI_XEN_MMUEXT_NEW_BASEPTR:
315 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
316 case VKI_XEN_MMUEXT_INVLPG_LOCAL:
317 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
318 case VKI_XEN_MMUEXT_INVLPG_ALL:
319 case VKI_XEN_MMUEXT_FLUSH_CACHE:
320 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
321 case VKI_XEN_MMUEXT_CLEAR_PAGE:
322 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
323 case VKI_XEN_MMUEXT_MARK_SUPER:
324 case VKI_XEN_MMUEXT_UNMARK_SUPER:
325 /* None */
326 break;
331 static void pre_evtchn_op(ThreadId tid,
332 SyscallArgLayout* layout,
333 /*MOD*/SyscallArgs* arrghs,
334 /*OUT*/SyscallStatus* status,
335 /*OUT*/UWord* flags,
336 __vki_u32 cmd, void *arg, int compat)
338 PRINT("__HYPERVISOR_event_channel_op%s ( %d, %p )",
339 compat ? "_compat" : "", cmd, arg);
341 switch (cmd) {
342 case VKI_XEN_EVTCHNOP_alloc_unbound: {
343 struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
344 PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
345 (Addr)&alloc_unbound->dom, sizeof(alloc_unbound->dom));
346 PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
347 (Addr)&alloc_unbound->remote_dom,
348 sizeof(alloc_unbound->remote_dom));
349 break;
351 default:
352 if ( compat )
353 bad_subop(tid, layout, arrghs, status, flags,
354 "__HYPERVISOR_event_channel_op_compat", cmd);
355 else
356 bad_subop(tid, layout, arrghs, status, flags,
357 "__HYPERVISOR_event_channel_op", cmd);
358 break;
362 PRE(evtchn_op)
364 pre_evtchn_op(tid, layout, arrghs, status, flags,
365 ARG1, (void *)ARG2, 0);
368 PRE(evtchn_op_compat)
370 struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
371 PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
372 ARG1, sizeof(*evtchn));
374 pre_evtchn_op(tid, layout, arrghs, status, flags,
375 evtchn->cmd, &evtchn->u, 1);
378 PRE(xen_version)
380 PRINT("__HYPERVISOR_xen_version ( %ld, %lx )", ARG1, ARG2);
382 switch (ARG1) {
383 case VKI_XENVER_version:
384 case VKI_XENVER_extraversion:
385 case VKI_XENVER_compile_info:
386 case VKI_XENVER_capabilities:
387 case VKI_XENVER_changeset:
388 case VKI_XENVER_platform_parameters:
389 case VKI_XENVER_get_features:
390 case VKI_XENVER_pagesize:
391 case VKI_XENVER_guest_handle:
392 case VKI_XENVER_commandline:
393 /* No inputs */
394 break;
396 default:
397 bad_subop(tid, layout, arrghs, status, flags,
398 "__HYPERVISOR_xen_version", ARG1);
399 break;
403 PRE(grant_table_op)
405 PRINT("__HYPERVISOR_grant_table_op ( %ld, 0x%lx, %ld )", ARG1, ARG2, ARG3);
406 switch (ARG1) {
407 case VKI_XEN_GNTTABOP_setup_table: {
408 struct vki_xen_gnttab_setup_table *gst =
409 (struct vki_xen_gnttab_setup_table*)ARG2;
410 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
411 (Addr)&gst->dom, sizeof(gst->dom));
412 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
413 (Addr)&gst->nr_frames, sizeof(gst->nr_frames));
414 break;
416 default:
417 bad_subop(tid, layout, arrghs, status, flags,
418 "__HYPERVISOR_grant_table_op", ARG1);
419 break;
423 PRE(sysctl) {
424 struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
426 PRINT("__HYPERVISOR_sysctl ( %d )", sysctl->cmd);
429 * Common part of xen_sysctl:
430 * uint32_t cmd;
431 * uint32_t interface_version;
433 PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1,
434 sizeof(vki_uint32_t) + sizeof(vki_uint32_t));
436 if (!sysctl)
437 return;
439 switch (sysctl->interface_version)
441 case 0x00000008:
442 case 0x00000009:
443 case 0x0000000a:
444 case 0x0000000b:
445 break;
446 default:
447 bad_intf_version(tid, layout, arrghs, status, flags,
448 "__HYPERVISOR_sysctl", sysctl->interface_version);
449 return;
452 #define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) \
453 PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field, \
454 (Addr)&sysctl->u._union._field, \
455 sizeof(sysctl->u._union._field))
456 #define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
457 __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
459 switch (sysctl->cmd) {
460 case VKI_XEN_SYSCTL_readconsole:
461 /* These are all unconditionally read */
462 PRE_XEN_SYSCTL_READ(readconsole, clear);
463 PRE_XEN_SYSCTL_READ(readconsole, incremental);
464 PRE_XEN_SYSCTL_READ(readconsole, buffer);
465 PRE_XEN_SYSCTL_READ(readconsole, count);
467 /* 'index' only read if 'incremental' is nonzero */
468 if (sysctl->u.readconsole.incremental)
469 PRE_XEN_SYSCTL_READ(readconsole, index);
470 break;
472 case VKI_XEN_SYSCTL_getdomaininfolist:
473 switch (sysctl->interface_version)
475 case 0x00000008:
476 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, first_domain);
477 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, max_domains);
478 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, buffer);
479 break;
480 case 0x00000009:
481 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, first_domain);
482 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, max_domains);
483 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, buffer);
484 break;
485 case 0x0000000a:
486 case 0x0000000b:
487 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
488 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
489 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
490 break;
491 default:
492 VG_(dmsg)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
493 "%"PRIx32" not implemented yet\n",
494 sysctl->interface_version);
495 SET_STATUS_Failure(VKI_EINVAL);
496 return;
498 break;
500 case VKI_XEN_SYSCTL_debug_keys:
501 PRE_XEN_SYSCTL_READ(debug_keys, keys);
502 PRE_XEN_SYSCTL_READ(debug_keys, nr_keys);
503 PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
504 (Addr)sysctl->u.debug_keys.keys.p,
505 sysctl->u.debug_keys.nr_keys * sizeof(char));
506 break;
508 case VKI_XEN_SYSCTL_sched_id:
509 /* No inputs */
510 break;
512 case VKI_XEN_SYSCTL_cpupool_op:
513 PRE_XEN_SYSCTL_READ(cpupool_op, op);
515 switch(sysctl->u.cpupool_op.op) {
516 case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE:
517 case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY:
518 case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO:
519 case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
520 case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU:
521 case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
522 PRE_XEN_SYSCTL_READ(cpupool_op, cpupool_id);
525 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE)
526 PRE_XEN_SYSCTL_READ(cpupool_op, sched_id);
528 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
529 PRE_XEN_SYSCTL_READ(cpupool_op, domid);
531 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
532 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU)
533 PRE_XEN_SYSCTL_READ(cpupool_op, cpu);
535 break;
537 case VKI_XEN_SYSCTL_physinfo:
538 /* No input params */
539 break;
541 case VKI_XEN_SYSCTL_topologyinfo:
542 PRE_XEN_SYSCTL_READ(topologyinfo, max_cpu_index);
543 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_core);
544 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_socket);
545 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_node);
546 break;
548 case VKI_XEN_SYSCTL_numainfo:
549 PRE_XEN_SYSCTL_READ(numainfo, max_node_index);
550 PRE_XEN_SYSCTL_READ(numainfo, node_to_memsize);
551 PRE_XEN_SYSCTL_READ(numainfo, node_to_memfree);
552 PRE_XEN_SYSCTL_READ(numainfo, node_to_node_distance);
553 break;
555 default:
556 bad_subop(tid, layout, arrghs, status, flags,
557 "__HYPERVISOR_sysctl", sysctl->cmd);
558 break;
560 #undef PRE_XEN_SYSCTL_READ
561 #undef __PRE_XEN_SYSCTL_READ
564 PRE(domctl)
566 struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
568 PRINT("__HYPERVISOR_domctl ( %d ) on dom%d", domctl->cmd, domctl->domain);
571 * Common part of xen_domctl:
572 * vki_uint32_t cmd;
573 * vki_uint32_t interface_version;
574 * vki_xen_domid_t domain;
576 PRE_MEM_READ("__HYPERVISOR_domctl", ARG1,
577 sizeof(vki_uint32_t) + sizeof(vki_uint32_t)
578 + sizeof(vki_xen_domid_t));
580 if (!domctl)
581 return;
583 switch (domctl->interface_version)
585 case 0x00000007:
586 case 0x00000008:
587 case 0x00000009:
588 case 0x0000000a:
589 break;
590 default:
591 bad_intf_version(tid, layout, arrghs, status, flags,
592 "__HYPERVISOR_domctl", domctl->interface_version);
593 return;
596 #define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) \
597 PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field, \
598 (Addr)&domctl->u._union._field, \
599 sizeof(domctl->u._union._field))
600 #define PRE_XEN_DOMCTL_READ(_domctl, _field) \
601 __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
603 switch (domctl->cmd) {
604 case VKI_XEN_DOMCTL_destroydomain:
605 case VKI_XEN_DOMCTL_pausedomain:
606 case VKI_XEN_DOMCTL_max_vcpus:
607 case VKI_XEN_DOMCTL_get_address_size:
608 case VKI_XEN_DOMCTL_gettscinfo:
609 case VKI_XEN_DOMCTL_getdomaininfo:
610 case VKI_XEN_DOMCTL_unpausedomain:
611 case VKI_XEN_DOMCTL_resumedomain:
612 /* No input fields. */
613 break;
615 case VKI_XEN_DOMCTL_createdomain:
616 PRE_XEN_DOMCTL_READ(createdomain, ssidref);
617 PRE_XEN_DOMCTL_READ(createdomain, handle);
618 PRE_XEN_DOMCTL_READ(createdomain, flags);
619 break;
621 case VKI_XEN_DOMCTL_gethvmcontext:
622 /* Xen unconditionally reads the 'buffer' pointer */
623 __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, buffer);
624 /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
625 * buffer is a request for the required size. */
626 if ( domctl->u.hvmcontext.buffer.p )
627 __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, size);
628 break;
630 case VKI_XEN_DOMCTL_sethvmcontext:
631 __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, size);
632 __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, buffer);
633 PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
634 (Addr)domctl->u.hvmcontext.buffer.p,
635 domctl->u.hvmcontext.size);
636 break;
638 case VKI_XEN_DOMCTL_gethvmcontext_partial:
639 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, type);
640 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, instance);
641 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, buffer);
643 switch (domctl->u.hvmcontext_partial.type) {
644 case VKI_HVM_SAVE_CODE(CPU):
645 if ( domctl->u.hvmcontext_partial.buffer.p )
646 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
647 (Addr)domctl->u.hvmcontext_partial.buffer.p,
648 VKI_HVM_SAVE_LENGTH(CPU));
649 break;
650 default:
651 bad_subop(tid, layout, arrghs, status, flags,
652 "__HYPERVISOR_domctl_gethvmcontext_partial type",
653 domctl->u.hvmcontext_partial.type);
654 break;
656 break;
658 case VKI_XEN_DOMCTL_max_mem:
659 PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
660 break;
662 case VKI_XEN_DOMCTL_set_address_size:
663 __PRE_XEN_DOMCTL_READ(set_address_size, address_size, size);
664 break;
666 case VKI_XEN_DOMCTL_settscinfo:
667 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.tsc_mode);
668 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.gtsc_khz);
669 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.incarnation);
670 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
671 break;
673 case VKI_XEN_DOMCTL_ioport_permission:
674 PRE_XEN_DOMCTL_READ(ioport_permission, first_port);
675 PRE_XEN_DOMCTL_READ(ioport_permission, nr_ports);
676 PRE_XEN_DOMCTL_READ(ioport_permission, allow_access);
677 break;
679 case VKI_XEN_DOMCTL_hypercall_init:
680 PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
681 break;
683 case VKI_XEN_DOMCTL_settimeoffset:
684 PRE_XEN_DOMCTL_READ(settimeoffset, time_offset_seconds);
685 break;
687 case VKI_XEN_DOMCTL_getvcpuinfo:
688 PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
689 break;
691 case VKI_XEN_DOMCTL_scheduler_op:
692 PRE_XEN_DOMCTL_READ(scheduler_op, sched_id);
693 PRE_XEN_DOMCTL_READ(scheduler_op, cmd);
694 if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_putinfo ) {
695 switch(domctl->u.scheduler_op.sched_id) {
696 case VKI_XEN_SCHEDULER_SEDF:
697 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.period);
698 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.slice);
699 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.latency);
700 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.extratime);
701 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.weight);
702 break;
703 case VKI_XEN_SCHEDULER_CREDIT:
704 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.weight);
705 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.cap);
706 break;
707 case VKI_XEN_SCHEDULER_CREDIT2:
708 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit2.weight);
709 break;
710 case VKI_XEN_SCHEDULER_ARINC653:
711 break;
714 break;
716 case VKI_XEN_DOMCTL_getvcpuaffinity:
717 __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity, vcpu);
718 break;
720 case VKI_XEN_DOMCTL_setvcpuaffinity:
721 __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
722 PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
723 (Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
724 domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
725 break;
727 case VKI_XEN_DOMCTL_getnodeaffinity:
728 __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
729 break;
730 case VKI_XEN_DOMCTL_setnodeaffinity:
731 __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
732 PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
733 (Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
734 domctl->u.nodeaffinity.nodemap.nr_bits / 8);
735 break;
737 case VKI_XEN_DOMCTL_getvcpucontext:
738 __PRE_XEN_DOMCTL_READ(getvcpucontext, vcpucontext, vcpu);
739 break;
741 case VKI_XEN_DOMCTL_setvcpucontext:
742 __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, vcpu);
743 __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, ctxt.p);
744 break;
746 case VKI_XEN_DOMCTL_set_cpuid:
747 PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
748 (Addr)&domctl->u.cpuid, sizeof(domctl->u.cpuid));
749 break;
751 case VKI_XEN_DOMCTL_getpageframeinfo3:
752 PRE_XEN_DOMCTL_READ(getpageframeinfo3, num);
753 PRE_XEN_DOMCTL_READ(getpageframeinfo3, array.p);
754 PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
755 (Addr)domctl->u.getpageframeinfo3.array.p,
756 domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
757 break;
759 case VKI_XEN_DOMCTL_getvcpuextstate:
760 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, vcpu);
761 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, xfeature_mask);
762 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, size);
763 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, buffer);
764 break;
766 case VKI_XEN_DOMCTL_shadow_op:
767 PRE_XEN_DOMCTL_READ(shadow_op, op);
769 switch(domctl->u.shadow_op.op)
771 case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
772 /* No further inputs */
773 break;
775 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE:
776 PRE_XEN_DOMCTL_READ(shadow_op, mode);
777 switch(domctl->u.shadow_op.mode)
779 case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY:
780 goto domctl_shadow_op_enable_logdirty;
783 default:
784 bad_subop(tid, layout, arrghs, status, flags,
785 "__HYPERVISOR_domctl shadowop mode",
786 domctl->u.shadow_op.mode);
787 break;
790 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
791 domctl_shadow_op_enable_logdirty:
792 /* No further inputs */
793 break;
795 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
796 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
797 PRE_XEN_DOMCTL_READ(shadow_op, dirty_bitmap);
798 PRE_XEN_DOMCTL_READ(shadow_op, pages);
799 break;
801 default:
802 bad_subop(tid, layout, arrghs, status, flags,
803 "__HYPERVISOR_domctl shadow(10)",
804 domctl->u.shadow_op.op);
805 break;
807 break;
809 case VKI_XEN_DOMCTL_set_max_evtchn:
810 PRE_XEN_DOMCTL_READ(set_max_evtchn, max_port);
811 break;
813 case VKI_XEN_DOMCTL_cacheflush:
814 PRE_XEN_DOMCTL_READ(cacheflush, start_pfn);
815 PRE_XEN_DOMCTL_READ(cacheflush, nr_pfns);
816 break;
818 case VKI_XEN_DOMCTL_set_access_required:
819 PRE_XEN_DOMCTL_READ(access_required, access_required);
820 break;
822 case VKI_XEN_DOMCTL_mem_event_op:
823 PRE_XEN_DOMCTL_READ(mem_event_op, op);
824 PRE_XEN_DOMCTL_READ(mem_event_op, mode);
825 break;
827 case VKI_XEN_DOMCTL_debug_op:
828 PRE_XEN_DOMCTL_READ(debug_op, op);
829 PRE_XEN_DOMCTL_READ(debug_op, vcpu);
830 break;
832 default:
833 bad_subop(tid, layout, arrghs, status, flags,
834 "__HYPERVISOR_domctl", domctl->cmd);
835 break;
837 #undef PRE_XEN_DOMCTL_READ
838 #undef __PRE_XEN_DOMCTL_READ
841 PRE(hvm_op)
843 unsigned long op = ARG1;
844 void *arg = (void *)(unsigned long)ARG2;
846 PRINT("__HYPERVISOR_hvm_op ( %ld, %p )", op, arg);
848 #define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) \
849 PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field, \
850 (Addr)&((_type*)arg)->_field, \
851 sizeof(((_type*)arg)->_field))
852 #define PRE_XEN_HVMOP_READ(_hvm_op, _field) \
853 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
855 switch (op) {
856 case VKI_XEN_HVMOP_set_param:
857 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, domid);
858 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, index);
859 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, value);
860 break;
862 case VKI_XEN_HVMOP_get_param:
863 __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, domid);
864 __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, index);
865 break;
867 case VKI_XEN_HVMOP_set_isa_irq_level:
868 PRE_XEN_HVMOP_READ(set_isa_irq_level, domid);
869 PRE_XEN_HVMOP_READ(set_isa_irq_level, isa_irq);
870 PRE_XEN_HVMOP_READ(set_isa_irq_level, level);
871 break;
873 case VKI_XEN_HVMOP_set_pci_link_route:
874 PRE_XEN_HVMOP_READ(set_pci_link_route, domid);
875 PRE_XEN_HVMOP_READ(set_pci_link_route, link);
876 PRE_XEN_HVMOP_READ(set_pci_link_route, isa_irq);
877 break;
879 case VKI_XEN_HVMOP_set_mem_type:
880 PRE_XEN_HVMOP_READ(set_mem_type, domid);
881 PRE_XEN_HVMOP_READ(set_mem_type, hvmmem_type);
882 PRE_XEN_HVMOP_READ(set_mem_type, nr);
883 PRE_XEN_HVMOP_READ(set_mem_type, first_pfn);
884 break;
886 case VKI_XEN_HVMOP_set_mem_access:
887 PRE_XEN_HVMOP_READ(set_mem_access, domid);
888 PRE_XEN_HVMOP_READ(set_mem_access, hvmmem_access);
889 PRE_XEN_HVMOP_READ(set_mem_access, first_pfn);
890 /* if default access */
891 if ( ((vki_xen_hvm_set_mem_access_t*)arg)->first_pfn != ~0ULL)
892 PRE_XEN_HVMOP_READ(set_mem_access, nr);
893 break;
895 case VKI_XEN_HVMOP_get_mem_access:
896 PRE_XEN_HVMOP_READ(get_mem_access, domid);
897 PRE_XEN_HVMOP_READ(get_mem_access, pfn);
899 PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
900 (Addr)&(((vki_xen_hvm_get_mem_access_t*)arg)->hvmmem_access),
901 sizeof(vki_uint16_t));
902 break;
904 case VKI_XEN_HVMOP_inject_trap:
905 PRE_XEN_HVMOP_READ(inject_trap, domid);
906 PRE_XEN_HVMOP_READ(inject_trap, vcpuid);
907 PRE_XEN_HVMOP_READ(inject_trap, vector);
908 PRE_XEN_HVMOP_READ(inject_trap, type);
909 PRE_XEN_HVMOP_READ(inject_trap, error_code);
910 PRE_XEN_HVMOP_READ(inject_trap, insn_len);
911 PRE_XEN_HVMOP_READ(inject_trap, cr2);
912 break;
914 default:
915 bad_subop(tid, layout, arrghs, status, flags,
916 "__HYPERVISOR_hvm_op", op);
917 break;
919 #undef __PRE_XEN_HVMOP_READ
920 #undef PRE_XEN_HVMOP_READ
923 PRE(tmem_op)
925 struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
927 PRINT("__HYPERVISOR_tmem_op ( %d )", tmem->cmd);
929 /* Common part for xen_tmem_op:
930 * vki_uint32_t cmd;
932 PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1, sizeof(vki_uint32_t));
935 #define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field) \
936 PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field, \
937 (Addr)&tmem->u._union._field, \
938 sizeof(tmem->u._union._field))
939 #define PRE_XEN_TMEMOP_READ(_tmem, _field) \
940 __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
942 switch(tmem->cmd) {
944 case VKI_XEN_TMEM_control:
946 /* Common part for control hypercall:
947 * vki_int32_t pool_id;
948 * vki_uint32_t subop;
950 PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
951 (Addr)&tmem->pool_id, sizeof(tmem->pool_id));
952 PRE_XEN_TMEMOP_READ(ctrl, subop);
954 switch (tmem->u.ctrl.subop) {
956 case VKI_XEN_TMEMC_save_begin:
957 PRE_XEN_TMEMOP_READ(ctrl, cli_id);
958 PRE_XEN_TMEMOP_READ(ctrl, arg1);
959 PRE_XEN_TMEMOP_READ(ctrl, buf);
960 break;
962 default:
963 bad_subop(tid, layout, arrghs, status, flags,
964 "__HYPERVISOR_tmem_op_control", tmem->u.ctrl.subop);
967 break;
969 default:
970 bad_subop(tid, layout, arrghs, status, flags,
971 "__HYPERVISOR_tmem_op", ARG1);
974 #undef PRE_XEN_TMEMOP_READ
975 #undef __PRE_XEN_TMEMOP_READ
978 POST(memory_op)
980 switch (ARG1) {
981 case VKI_XENMEM_maximum_ram_page:
982 case VKI_XENMEM_set_memory_map:
983 case VKI_XENMEM_decrease_reservation:
984 case VKI_XENMEM_claim_pages:
985 case VKI_XENMEM_maximum_gpfn:
986 case VKI_XENMEM_remove_from_physmap:
987 case VKI_XENMEM_access_op:
988 /* No outputs */
989 break;
990 case VKI_XENMEM_increase_reservation:
991 case VKI_XENMEM_populate_physmap: {
992 struct xen_memory_reservation *memory_reservation =
993 (struct xen_memory_reservation *)ARG2;
995 POST_MEM_WRITE((Addr)memory_reservation->extent_start.p,
996 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
997 break;
1000 case VKI_XENMEM_machphys_mfn_list: {
1001 struct vki_xen_machphys_mfn_list *arg =
1002 (struct vki_xen_machphys_mfn_list *)ARG2;
1003 POST_MEM_WRITE((Addr)&arg->nr_extents, sizeof(arg->nr_extents));
1004 POST_MEM_WRITE((Addr)arg->extent_start.p,
1005 sizeof(vki_xen_pfn_t) * arg->nr_extents);
1006 break;
1009 case VKI_XENMEM_add_to_physmap: {
1010 struct vki_xen_add_to_physmap *arg =
1011 (struct vki_xen_add_to_physmap *)ARG2;
1012 if (arg->space == VKI_XENMAPSPACE_gmfn_range)
1013 POST_MEM_WRITE(ARG2, sizeof(*arg));
1016 case VKI_XENMEM_get_sharing_freed_pages:
1017 case VKI_XENMEM_get_sharing_shared_pages:
1018 /* No outputs */
1019 break;
1023 POST(mmuext_op)
1025 unsigned int *pdone = (unsigned int *)ARG3;
1026 /* simplistic */
1027 POST_MEM_WRITE((Addr)pdone, sizeof(*pdone));
1030 static void post_evtchn_op(ThreadId tid, __vki_u32 cmd, void *arg, int compat)
1032 switch (cmd) {
1033 case VKI_XEN_EVTCHNOP_alloc_unbound: {
1034 struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
1035 POST_MEM_WRITE((Addr)&alloc_unbound->port, sizeof(alloc_unbound->port));
1036 break;
1041 POST(evtchn_op)
1043 post_evtchn_op(tid, ARG1, (void *)ARG2, 0);
1046 POST(evtchn_op_compat)
1048 struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
1049 post_evtchn_op(tid, evtchn->cmd, &evtchn->u, 1);
1052 POST(xen_version)
1054 switch (ARG1) {
1055 case VKI_XENVER_version:
1056 /* No outputs */
1057 break;
1058 case VKI_XENVER_extraversion:
1059 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_extraversion_t));
1060 break;
1061 case VKI_XENVER_compile_info:
1062 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_compile_info));
1063 break;
1064 case VKI_XENVER_capabilities:
1065 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_capabilities_info_t));
1066 break;
1067 case VKI_XENVER_changeset:
1068 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_changeset_info_t));
1069 break;
1070 case VKI_XENVER_platform_parameters:
1071 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_platform_parameters));
1072 break;
1073 case VKI_XENVER_get_features:
1074 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_feature_info));
1075 break;
1076 case VKI_XENVER_pagesize:
1077 /* No outputs */
1078 break;
1079 case VKI_XENVER_guest_handle:
1080 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_domain_handle_t));
1081 break;
1082 case VKI_XENVER_commandline:
1083 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_commandline_t));
1084 break;
1088 POST(grant_table_op)
1090 switch (ARG1) {
1091 case VKI_XEN_GNTTABOP_setup_table: {
1092 struct vki_xen_gnttab_setup_table *gst =
1093 (struct vki_xen_gnttab_setup_table*)ARG2;
1094 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1095 (Addr)&gst->status, sizeof(gst->status));
1096 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1097 (Addr)gst->frame_list.p,
1098 sizeof(*gst->frame_list.p) & gst->nr_frames);
1099 break;
1104 POST(sysctl)
1106 struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
1108 switch (sysctl->interface_version)
1110 case 0x00000008:
1111 case 0x00000009:
1112 case 0x0000000a:
1113 case 0x0000000b:
1114 break;
1115 default:
1116 return;
1119 #define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
1120 POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
1121 sizeof(sysctl->u._union._field))
1122 #define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1123 __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1125 switch (sysctl->cmd) {
1126 case VKI_XEN_SYSCTL_readconsole:
1127 POST_MEM_WRITE((Addr)sysctl->u.readconsole.buffer.p,
1128 sysctl->u.readconsole.count * sizeof(char));
1129 break;
1131 case VKI_XEN_SYSCTL_getdomaininfolist:
1132 switch (sysctl->interface_version)
1134 case 0x00000008:
1135 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008, num_domains);
1136 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000008.buffer.p,
1137 sizeof(*sysctl->u.getdomaininfolist_00000008.buffer.p)
1138 * sysctl->u.getdomaininfolist_00000008.num_domains);
1139 break;
1140 case 0x00000009:
1141 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009, num_domains);
1142 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000009.buffer.p,
1143 sizeof(*sysctl->u.getdomaininfolist_00000009.buffer.p)
1144 * sysctl->u.getdomaininfolist_00000009.num_domains);
1145 break;
1146 case 0x0000000a:
1147 case 0x0000000b:
1148 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
1149 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
1150 sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
1151 * sysctl->u.getdomaininfolist_0000000a.num_domains);
1152 break;
1154 break;
1156 case VKI_XEN_SYSCTL_sched_id:
1157 POST_XEN_SYSCTL_WRITE(sched_id, sched_id);
1158 break;
1160 case VKI_XEN_SYSCTL_cpupool_op:
1161 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE ||
1162 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO)
1163 POST_XEN_SYSCTL_WRITE(cpupool_op, cpupool_id);
1164 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO) {
1165 POST_XEN_SYSCTL_WRITE(cpupool_op, sched_id);
1166 POST_XEN_SYSCTL_WRITE(cpupool_op, n_dom);
1168 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO ||
1169 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
1170 POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
1171 break;
1173 case VKI_XEN_SYSCTL_physinfo:
1174 switch (sysctl->interface_version)
1176 case 0x00000008:
1177 case 0x00000009: /* Unchanged from version 8 */
1178 POST_XEN_SYSCTL_WRITE(physinfo_00000008, threads_per_core);
1179 POST_XEN_SYSCTL_WRITE(physinfo_00000008, cores_per_socket);
1180 POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_cpus);
1181 POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_cpu_id);
1182 POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_nodes);
1183 POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_node_id);
1184 POST_XEN_SYSCTL_WRITE(physinfo_00000008, cpu_khz);
1185 POST_XEN_SYSCTL_WRITE(physinfo_00000008, total_pages);
1186 POST_XEN_SYSCTL_WRITE(physinfo_00000008, free_pages);
1187 POST_XEN_SYSCTL_WRITE(physinfo_00000008, scrub_pages);
1188 POST_XEN_SYSCTL_WRITE(physinfo_00000008, hw_cap[8]);
1189 POST_XEN_SYSCTL_WRITE(physinfo_00000008, capabilities);
1190 break;
1191 case 0x0000000a:
1192 case 0x0000000b:
1193 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
1194 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
1195 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
1196 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_cpu_id);
1197 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_nodes);
1198 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_node_id);
1199 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cpu_khz);
1200 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, total_pages);
1201 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, free_pages);
1202 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, scrub_pages);
1203 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, outstanding_pages);
1204 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
1205 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
1206 break;
1208 break;
1210 case VKI_XEN_SYSCTL_topologyinfo:
1211 POST_XEN_SYSCTL_WRITE(topologyinfo, max_cpu_index);
1212 if (sysctl->u.topologyinfo.cpu_to_core.p)
1213 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_core.p,
1214 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1215 if (sysctl->u.topologyinfo.cpu_to_socket.p)
1216 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_socket.p,
1217 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1218 if (sysctl->u.topologyinfo.cpu_to_node.p)
1219 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_node.p,
1220 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1221 break;
1223 case VKI_XEN_SYSCTL_numainfo:
1224 POST_XEN_SYSCTL_WRITE(numainfo, max_node_index);
1225 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memsize.p,
1226 sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1227 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memfree.p,
1228 sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1229 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_node_distance.p,
1230 sizeof(uint32_t) * sysctl->u.numainfo.max_node_index);
1231 break;
1233 /* No outputs */
1234 case VKI_XEN_SYSCTL_debug_keys:
1235 break;
1237 #undef POST_XEN_SYSCTL_WRITE
1238 #undef __POST_XEN_SYSCTL_WRITE
1241 POST(domctl){
1242 struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
1244 switch (domctl->interface_version) {
1245 case 0x00000007:
1246 case 0x00000008:
1247 case 0x00000009:
1248 case 0x0000000a:
1249 break;
1250 default:
1251 return;
1254 #define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
1255 POST_MEM_WRITE((Addr)&domctl->u._union._field, \
1256 sizeof(domctl->u._union._field));
1257 #define POST_XEN_DOMCTL_WRITE(_domctl, _field) \
1258 __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
1260 switch (domctl->cmd) {
1261 case VKI_XEN_DOMCTL_createdomain:
1262 case VKI_XEN_DOMCTL_destroydomain:
1263 case VKI_XEN_DOMCTL_pausedomain:
1264 case VKI_XEN_DOMCTL_max_mem:
1265 case VKI_XEN_DOMCTL_set_address_size:
1266 case VKI_XEN_DOMCTL_settscinfo:
1267 case VKI_XEN_DOMCTL_ioport_permission:
1268 case VKI_XEN_DOMCTL_hypercall_init:
1269 case VKI_XEN_DOMCTL_setvcpuaffinity:
1270 case VKI_XEN_DOMCTL_setvcpucontext:
1271 case VKI_XEN_DOMCTL_setnodeaffinity:
1272 case VKI_XEN_DOMCTL_set_cpuid:
1273 case VKI_XEN_DOMCTL_unpausedomain:
1274 case VKI_XEN_DOMCTL_sethvmcontext:
1275 case VKI_XEN_DOMCTL_debug_op:
1276 case VKI_XEN_DOMCTL_set_max_evtchn:
1277 case VKI_XEN_DOMCTL_cacheflush:
1278 case VKI_XEN_DOMCTL_resumedomain:
1279 case VKI_XEN_DOMCTL_set_access_required:
1280 /* No output fields */
1281 break;
1283 case VKI_XEN_DOMCTL_max_vcpus:
1284 POST_XEN_DOMCTL_WRITE(max_vcpus, max);
1285 break;
1287 case VKI_XEN_DOMCTL_get_address_size:
1288 __POST_XEN_DOMCTL_WRITE(get_address_size, address_size, size);
1289 break;
1291 case VKI_XEN_DOMCTL_gettscinfo:
1292 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.tsc_mode);
1293 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.gtsc_khz);
1294 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.incarnation);
1295 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.elapsed_nsec);
1296 break;
1298 case VKI_XEN_DOMCTL_getvcpuinfo:
1299 POST_XEN_DOMCTL_WRITE(getvcpuinfo, online);
1300 POST_XEN_DOMCTL_WRITE(getvcpuinfo, blocked);
1301 POST_XEN_DOMCTL_WRITE(getvcpuinfo, running);
1302 POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu_time);
1303 POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
1304 break;
1306 case VKI_XEN_DOMCTL_gethvmcontext:
1307 /* Xen unconditionally writes size... */
1308 __POST_XEN_DOMCTL_WRITE(gethvmcontext, hvmcontext, size);
1309 /* ...but only writes to the buffer if it was non NULL */
1310 if ( domctl->u.hvmcontext.buffer.p )
1311 POST_MEM_WRITE((Addr)domctl->u.hvmcontext.buffer.p,
1312 sizeof(*domctl->u.hvmcontext.buffer.p)
1313 * domctl->u.hvmcontext.size);
1314 break;
1316 case VKI_XEN_DOMCTL_gethvmcontext_partial:
1317 switch (domctl->u.hvmcontext_partial.type) {
1318 case VKI_HVM_SAVE_CODE(CPU):
1319 if ( domctl->u.hvmcontext_partial.buffer.p )
1320 POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial.buffer.p,
1321 VKI_HVM_SAVE_LENGTH(CPU));
1322 break;
1324 break;
1326 case VKI_XEN_DOMCTL_scheduler_op:
1327 if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
1328 switch(domctl->u.scheduler_op.sched_id) {
1329 case VKI_XEN_SCHEDULER_SEDF:
1330 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.period);
1331 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.slice);
1332 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.latency);
1333 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.extratime);
1334 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.weight);
1335 break;
1336 case VKI_XEN_SCHEDULER_CREDIT:
1337 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.weight);
1338 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.cap);
1339 break;
1340 case VKI_XEN_SCHEDULER_CREDIT2:
1341 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit2.weight);
1342 break;
1343 case VKI_XEN_SCHEDULER_ARINC653:
1344 break;
1347 break;
1349 case VKI_XEN_DOMCTL_getvcpuaffinity:
1350 POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
1351 domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
1352 break;
1354 case VKI_XEN_DOMCTL_getnodeaffinity:
1355 POST_MEM_WRITE((Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
1356 domctl->u.nodeaffinity.nodemap.nr_bits / 8);
1357 break;
1359 case VKI_XEN_DOMCTL_getdomaininfo:
1360 switch (domctl->interface_version) {
1361 case 0x00000007:
1362 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, domain);
1363 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, flags);
1364 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, tot_pages);
1365 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_pages);
1366 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shr_pages);
1367 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shared_info_frame);
1368 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpu_time);
1369 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, nr_online_vcpus);
1370 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_vcpu_id);
1371 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, ssidref);
1372 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, handle);
1373 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpupool);
1374 break;
1375 case 0x00000008:
1376 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, domain);
1377 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, flags);
1378 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, tot_pages);
1379 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_pages);
1380 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shr_pages);
1381 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, paged_pages);
1382 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shared_info_frame);
1383 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpu_time);
1384 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, nr_online_vcpus);
1385 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_vcpu_id);
1386 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, ssidref);
1387 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
1388 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
1389 break;
1390 case 0x00000009:
1391 case 0x0000000a:
1392 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
1393 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
1394 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
1395 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_pages);
1396 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, outstanding_pages);
1397 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shr_pages);
1398 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, paged_pages);
1399 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shared_info_frame);
1400 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpu_time);
1401 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, nr_online_vcpus);
1402 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_vcpu_id);
1403 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, ssidref);
1404 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, handle);
1405 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpupool);
1406 break;
1408 break;
1409 case VKI_XEN_DOMCTL_getvcpucontext:
1410 __POST_XEN_DOMCTL_WRITE(getvcpucontext, vcpucontext, ctxt.p);
1411 break;
1413 case VKI_XEN_DOMCTL_getpageframeinfo3:
1414 POST_MEM_WRITE((Addr)domctl->u.getpageframeinfo3.array.p,
1415 domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
1416 break;
1419 case VKI_XEN_DOMCTL_getvcpuextstate:
1420 __POST_XEN_DOMCTL_WRITE(getvcpuextstate, vcpuextstate, xfeature_mask);
1421 __POST_XEN_DOMCTL_WRITE(getvcpuextstate, vcpuextstate, size);
1422 POST_MEM_WRITE((Addr)domctl->u.vcpuextstate.buffer.p,
1423 domctl->u.vcpuextstate.size);
1424 break;
1426 case VKI_XEN_DOMCTL_shadow_op:
1427 switch(domctl->u.shadow_op.op)
1429 case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
1430 /* No outputs */
1431 break;
1433 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
1434 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
1435 POST_XEN_DOMCTL_WRITE(shadow_op, pages);
1436 POST_XEN_DOMCTL_WRITE(shadow_op, stats.fault_count);
1437 POST_XEN_DOMCTL_WRITE(shadow_op, stats.dirty_count);
1438 if(domctl->u.shadow_op.dirty_bitmap.p)
1439 POST_MEM_WRITE((Addr)domctl->u.shadow_op.dirty_bitmap.p,
1440 domctl->u.shadow_op.pages * sizeof(vki_uint8_t));
1441 break;
1443 default:
1444 break;
1446 break;
1447 case VKI_XEN_DOMCTL_mem_event_op:
1448 POST_XEN_DOMCTL_WRITE(mem_event_op, port);
1450 break;
1452 #undef POST_XEN_DOMCTL_WRITE
1453 #undef __POST_XEN_DOMCTL_WRITE
1456 POST(hvm_op)
1458 unsigned long op = ARG1;
1459 void *arg = (void *)(unsigned long)ARG2;
1461 #define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) \
1462 POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
1463 sizeof(((_type*)arg)->_field))
1464 #define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
1465 __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1467 switch (op) {
1468 case VKI_XEN_HVMOP_set_param:
1469 case VKI_XEN_HVMOP_set_isa_irq_level:
1470 case VKI_XEN_HVMOP_set_pci_link_route:
1471 case VKI_XEN_HVMOP_set_mem_type:
1472 case VKI_XEN_HVMOP_set_mem_access:
1473 case VKI_XEN_HVMOP_inject_trap:
1474 /* No output paramters */
1475 break;
1477 case VKI_XEN_HVMOP_get_param:
1478 __POST_XEN_HVMOP_WRITE(get_param, struct vki_xen_hvm_param, value);
1479 break;
1481 case VKI_XEN_HVMOP_get_mem_access:
1482 POST_XEN_HVMOP_WRITE(get_mem_access, hvmmem_access);
1483 break;
1485 #undef __POST_XEN_HVMOP_WRITE
1486 #undef POST_XEN_HVMOP_WRITE
1489 POST(tmem_op)
1491 struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
1493 switch(tmem->cmd) {
1495 case VKI_XEN_TMEM_control:
1497 switch(tmem->u.ctrl.subop) {
1498 /* No outputs */
1499 case VKI_XEN_TMEMC_save_begin:
1500 break;
1503 break;
1507 typedef
1508 struct {
1509 SyscallTableEntry entry;
1510 int nr_args;
1512 XenHypercallTableEntry;
1514 #define HYPX_(const, name, nr_args) \
1515 [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
1516 #define HYPXY(const, name, nr_args) \
1517 [const] = { { vgSysWrap_xen_##name##_before, \
1518 vgSysWrap_xen_##name##_after }, \
1519 nr_args }
1521 static XenHypercallTableEntry hypercall_table[] = {
1522 // __VKI_XEN_set_trap_table // 0
1523 // __VKI_XEN_mmu_update // 1
1524 // __VKI_XEN_set_gdt // 2
1525 // __VKI_XEN_stack_switch // 3
1526 // __VKI_XEN_set_callbacks // 4
1528 // __VKI_XEN_fpu_taskswitch // 5
1529 // __VKI_XEN_sched_op_compat // 6
1530 // __VKI_XEN_platform_op // 7
1531 // __VKI_XEN_set_debugreg // 8
1532 // __VKI_XEN_get_debugreg // 9
1534 // __VKI_XEN_update_descriptor // 10
1535 // // 11
1536 HYPXY(__VKI_XEN_memory_op, memory_op, 2), // 12
1537 // __VKI_XEN_multicall // 13
1538 // __VKI_XEN_update_va_mapping // 14
1540 // __VKI_XEN_set_timer_op // 15
1541 HYPXY(__VKI_XEN_event_channel_op_compat, evtchn_op_compat, 1), // 16
1542 HYPXY(__VKI_XEN_xen_version, xen_version, 2), // 17
1543 // __VKI_XEN_console_io // 18
1544 // __VKI_XEN_physdev_op_compat // 19
1546 HYPXY(__VKI_XEN_grant_table_op, grant_table_op, 3), // 20
1547 // __VKI_XEN_vm_assist // 21
1548 // __VKI_XEN_update_va_mapping_otherdomain // 22
1549 // __VKI_XEN_iret, iret // 23
1550 // __VKI_XEN_vcpu_op, vcpu_op // 24
1552 // __VKI_XEN_set_segment_base // 25
1553 HYPXY(__VKI_XEN_mmuext_op, mmuext_op, 2), // 26
1554 // __VKI_XEN_xsm_op // 27
1555 // __VKI_XEN_nmi_op // 28
1556 // __VKI_XEN_sched_op // 29
1558 // __VKI_XEN_callback_op // 30
1559 // __VKI_XEN_xenoprof_op // 31
1560 HYPXY(__VKI_XEN_event_channel_op, evtchn_op, 2), // 32
1561 // __VKI_XEN_physdev_op // 33
1562 HYPXY(__VKI_XEN_hvm_op, hvm_op, 2), // 34
1564 HYPXY(__VKI_XEN_sysctl, sysctl, 1), // 35
1565 HYPXY(__VKI_XEN_domctl, domctl, 1), // 36
1566 // __VKI_XEN_kexec_op // 37
1567 HYPXY(__VKI_XEN_tmem_op, tmem_op, 1), // 38
1570 static void bad_before ( ThreadId tid,
1571 SyscallArgLayout* layout,
1572 /*MOD*/SyscallArgs* args,
1573 /*OUT*/SyscallStatus* status,
1574 /*OUT*/UWord* flags )
1576 VG_(dmsg)("WARNING: unhandled hypercall: %s\n",
1577 VG_SYSNUM_STRING(args->sysno));
1578 if (VG_(clo_verbosity) > 1) {
1579 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
1581 VG_(dmsg)("You may be able to write your own handler.\n");
1582 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1583 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
1584 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
1585 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
1587 SET_STATUS_Failure(VKI_ENOSYS);
1590 static XenHypercallTableEntry bad_hyper =
1591 { { bad_before, NULL }, 0 };
1593 static XenHypercallTableEntry* ML_(get_xen_hypercall_entry) ( UInt sysno )
1595 XenHypercallTableEntry *ret = &bad_hyper;
1597 const UInt hypercall_table_size
1598 = sizeof(hypercall_table) / sizeof(hypercall_table[0]);
1600 /* Is it in the contiguous initial section of the table? */
1601 if (sysno < hypercall_table_size) {
1602 XenHypercallTableEntry* ent = &hypercall_table[sysno];
1603 if (ent->entry.before != NULL)
1604 ret = ent;
1607 /* Can't find a wrapper */
1608 return ret;
1611 DEFN_PRE_TEMPLATE(xen, hypercall)
1613 XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
1615 /* Return number of arguments consumed */
1616 ARG8 = ent->nr_args;
1618 vg_assert(ent);
1619 vg_assert(ent->entry.before);
1620 (ent->entry.before)( tid, layout, arrghs, status, flags );
1624 DEFN_POST_TEMPLATE(xen, hypercall)
1626 XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
1628 /* Return number of arguments consumed */
1629 ARG8 = ent->nr_args;
1631 vg_assert(ent);
1632 if (ent->entry.after)
1633 (ent->entry.after)( tid, arrghs, status );
1636 #endif // defined(ENABLE_XEN)