xen: Basic syswrap infrastructure for XEN_sched_op hypercalls
[valgrind.git] / coregrind / m_syswrap / syswrap-xen.c
blobf9515e086c9c553b34309638f290beca09dd78c8
2 /*--------------------------------------------------------------------*/
3 /*--- Xen Hypercalls syswrap-xen.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2012 Citrix Systems
11 ian.campbell@citrix.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
34 #if defined(ENABLE_XEN)
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
40 #include "pub_core_transtab.h" // VG_(discard_translations)
41 #include "pub_core_xarray.h"
42 #include "pub_core_clientstate.h"
43 #include "pub_core_debuglog.h"
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcassert.h"
46 #include "pub_core_libcfile.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_mallocfree.h"
51 #include "pub_core_tooliface.h"
52 #include "pub_core_options.h"
53 #include "pub_core_scheduler.h"
54 #include "pub_core_signals.h"
55 #include "pub_core_syscall.h"
56 #include "pub_core_syswrap.h"
57 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
59 #include "priv_types_n_macros.h"
60 #include "priv_syswrap-generic.h"
61 #include "priv_syswrap-xen.h"
63 #include <inttypes.h>
65 #define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
66 #define POST(name) static DEFN_POST_TEMPLATE(xen, name)
68 static void bad_intf_version ( ThreadId tid,
69 SyscallArgLayout* layout,
70 /*MOD*/SyscallArgs* args,
71 /*OUT*/SyscallStatus* status,
72 /*OUT*/UWord* flags,
73 const HChar* hypercall,
74 UWord version)
76 VG_(dmsg)("WARNING: %s version %#lx not supported\n",
77 hypercall, version);
78 if (VG_(clo_verbosity) > 1) {
79 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
81 VG_(dmsg)("You may be able to write your own handler.\n");
82 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
83 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
84 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
85 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
87 SET_STATUS_Failure(VKI_ENOSYS);
90 static void bad_subop ( ThreadId tid,
91 SyscallArgLayout* layout,
92 /*MOD*/SyscallArgs* args,
93 /*OUT*/SyscallStatus* status,
94 /*OUT*/UWord* flags,
95 const HChar* hypercall,
96 UWord subop)
98 VG_(dmsg)("WARNING: unhandled %s subop: %ld\n",
99 hypercall, subop);
100 if (VG_(clo_verbosity) > 1) {
101 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
103 VG_(dmsg)("You may be able to write your own handler.\n");
104 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
105 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
106 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
107 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
109 SET_STATUS_Failure(VKI_ENOSYS);
112 PRE(memory_op)
114 PRINT("__HYPERVISOR_memory_op ( %ld, %lx )", ARG1, ARG2);
116 switch (ARG1) {
118 case VKI_XENMEM_maximum_ram_page:
119 /* No inputs */
120 break;
122 case VKI_XENMEM_maximum_gpfn:
123 PRE_MEM_READ("XENMEM_maximum_gpfn domid",
124 (Addr)ARG2, sizeof(vki_xen_domid_t));
125 break;
127 case VKI_XENMEM_machphys_mfn_list:
128 case VKI_XENMEM_machphys_compat_mfn_list: {
129 struct vki_xen_machphys_mfn_list *arg =
130 (struct vki_xen_machphys_mfn_list *)ARG2;
131 PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
132 (Addr)&arg->max_extents, sizeof(arg->max_extents));
133 PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
134 (Addr)&arg->extent_start, sizeof(arg->extent_start));
135 break;
138 case VKI_XENMEM_set_memory_map: {
139 struct vki_xen_foreign_memory_map *arg =
140 (struct vki_xen_foreign_memory_map *)ARG2;
141 PRE_MEM_READ("XENMEM_set_memory_map domid",
142 (Addr)&arg->domid, sizeof(arg->domid));
143 PRE_MEM_READ("XENMEM_set_memory_map map",
144 (Addr)&arg->map, sizeof(arg->map));
145 break;
148 case VKI_XENMEM_memory_map:
149 case VKI_XENMEM_machine_memory_map: {
150 struct vki_xen_memory_map *arg =
151 (struct vki_xen_memory_map *)ARG2;
152 PRE_MEM_READ("XENMEM_memory_map nr_entries",
153 (Addr)&arg->nr_entries, sizeof(arg->nr_entries));
154 break;
157 case VKI_XENMEM_increase_reservation:
158 case VKI_XENMEM_decrease_reservation:
159 case VKI_XENMEM_populate_physmap:
160 case VKI_XENMEM_claim_pages: {
161 struct xen_memory_reservation *memory_reservation =
162 (struct xen_memory_reservation *)ARG2;
163 const HChar *which;
165 switch (ARG1) {
166 case VKI_XENMEM_increase_reservation:
167 which = "XENMEM_increase_reservation";
168 break;
169 case VKI_XENMEM_decrease_reservation:
170 which = "XENMEM_decrease_reservation";
171 PRE_MEM_READ(which,
172 (Addr)memory_reservation->extent_start.p,
173 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
174 break;
175 case VKI_XENMEM_populate_physmap:
176 which = "XENMEM_populate_physmap";
177 PRE_MEM_READ(which,
178 (Addr)memory_reservation->extent_start.p,
179 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
180 break;
181 case VKI_XENMEM_claim_pages:
182 which = "XENMEM_claim_pages";
183 break;
184 default:
185 which = "XENMEM_unknown";
186 break;
189 PRE_MEM_READ(which,
190 (Addr)&memory_reservation->extent_start,
191 sizeof(memory_reservation->extent_start));
192 PRE_MEM_READ(which,
193 (Addr)&memory_reservation->nr_extents,
194 sizeof(memory_reservation->nr_extents));
195 PRE_MEM_READ(which,
196 (Addr)&memory_reservation->extent_order,
197 sizeof(memory_reservation->extent_order));
198 PRE_MEM_READ(which,
199 (Addr)&memory_reservation->mem_flags,
200 sizeof(memory_reservation->mem_flags));
201 PRE_MEM_READ(which,
202 (Addr)&memory_reservation->domid,
203 sizeof(memory_reservation->domid));
204 break;
207 case VKI_XENMEM_add_to_physmap: {
208 struct vki_xen_add_to_physmap *arg =
209 (struct vki_xen_add_to_physmap *)ARG2;
210 PRE_MEM_READ("XENMEM_add_to_physmap domid",
211 (Addr)&arg->domid, sizeof(arg->domid));
212 PRE_MEM_READ("XENMEM_add_to_physmap size",
213 (Addr)&arg->size, sizeof(arg->size));
214 PRE_MEM_READ("XENMEM_add_to_physmap space",
215 (Addr)&arg->space, sizeof(arg->space));
216 PRE_MEM_READ("XENMEM_add_to_physmap idx",
217 (Addr)&arg->idx, sizeof(arg->idx));
218 PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
219 (Addr)&arg->gpfn, sizeof(arg->gpfn));
220 break;
223 case VKI_XENMEM_remove_from_physmap: {
224 struct vki_xen_remove_from_physmap *arg =
225 (struct vki_xen_remove_from_physmap *)ARG2;
226 PRE_MEM_READ("XENMEM_remove_from_physmap domid",
227 (Addr)&arg->domid, sizeof(arg->domid));
228 PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
229 (Addr)&arg->gpfn, sizeof(arg->gpfn));
230 break;
233 case VKI_XENMEM_get_sharing_freed_pages:
234 case VKI_XENMEM_get_sharing_shared_pages:
235 break;
237 case VKI_XENMEM_access_op: {
238 struct vki_xen_mem_event_op *arg =
239 (struct vki_xen_mem_event_op *)ARG2;
240 PRE_MEM_READ("XENMEM_access_op domid",
241 (Addr)&arg->domain, sizeof(arg->domain));
242 PRE_MEM_READ("XENMEM_access_op op",
243 (Addr)&arg->op, sizeof(arg->op));
244 PRE_MEM_READ("XENMEM_access_op gfn",
245 (Addr)&arg->gfn, sizeof(arg->gfn));
246 break;
248 default:
249 bad_subop(tid, layout, arrghs, status, flags,
250 "__HYPERVISOR_memory_op", ARG1);
251 break;
255 PRE(mmuext_op)
257 struct vki_xen_mmuext_op *ops = (struct vki_xen_mmuext_op *)ARG1;
258 unsigned int i, nr = ARG2;
260 for (i=0; i<nr; i++) {
261 struct vki_xen_mmuext_op *op = ops + i;
262 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
263 (Addr)&op->cmd, sizeof(op->cmd));
264 switch(op->cmd) {
265 case VKI_XEN_MMUEXT_PIN_L1_TABLE:
266 case VKI_XEN_MMUEXT_PIN_L2_TABLE:
267 case VKI_XEN_MMUEXT_PIN_L3_TABLE:
268 case VKI_XEN_MMUEXT_PIN_L4_TABLE:
269 case VKI_XEN_MMUEXT_UNPIN_TABLE:
270 case VKI_XEN_MMUEXT_NEW_BASEPTR:
271 case VKI_XEN_MMUEXT_CLEAR_PAGE:
272 case VKI_XEN_MMUEXT_COPY_PAGE:
273 case VKI_XEN_MMUEXT_MARK_SUPER:
274 case VKI_XEN_MMUEXT_UNMARK_SUPER:
275 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
276 (Addr)&op->arg1.mfn,
277 sizeof(op->arg1.mfn));
278 break;
280 case VKI_XEN_MMUEXT_INVLPG_LOCAL:
281 case VKI_XEN_MMUEXT_INVLPG_ALL:
282 case VKI_XEN_MMUEXT_SET_LDT:
283 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
284 (Addr)&op->arg1.linear_addr,
285 sizeof(op->arg1.linear_addr));
286 break;
288 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
289 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
290 case VKI_XEN_MMUEXT_INVLPG_MULTI:
291 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
292 case VKI_XEN_MMUEXT_FLUSH_CACHE:
293 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
294 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
295 /* None */
296 break;
299 switch(op->cmd) {
300 case VKI_XEN_MMUEXT_SET_LDT:
301 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
302 (Addr)&op->arg2.nr_ents,
303 sizeof(op->arg2.nr_ents));
304 break;
306 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
307 case VKI_XEN_MMUEXT_INVLPG_MULTI:
308 /* How many??? */
309 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
310 (Addr)&op->arg2.vcpumask,
311 sizeof(op->arg2.vcpumask));
312 break;
314 case VKI_XEN_MMUEXT_COPY_PAGE:
315 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
316 (Addr)&op->arg2.src_mfn,
317 sizeof(op->arg2.src_mfn));
318 break;
320 case VKI_XEN_MMUEXT_PIN_L1_TABLE:
321 case VKI_XEN_MMUEXT_PIN_L2_TABLE:
322 case VKI_XEN_MMUEXT_PIN_L3_TABLE:
323 case VKI_XEN_MMUEXT_PIN_L4_TABLE:
324 case VKI_XEN_MMUEXT_UNPIN_TABLE:
325 case VKI_XEN_MMUEXT_NEW_BASEPTR:
326 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
327 case VKI_XEN_MMUEXT_INVLPG_LOCAL:
328 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
329 case VKI_XEN_MMUEXT_INVLPG_ALL:
330 case VKI_XEN_MMUEXT_FLUSH_CACHE:
331 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
332 case VKI_XEN_MMUEXT_CLEAR_PAGE:
333 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
334 case VKI_XEN_MMUEXT_MARK_SUPER:
335 case VKI_XEN_MMUEXT_UNMARK_SUPER:
336 /* None */
337 break;
342 PRE(sched_op)
344 PRINT("__HYPERVISOR_sched_op ( %ld, %lx )", ARG1, ARG2);
345 void *arg = (void *)(unsigned long)ARG2;
347 #define __PRE_XEN_SCHEDOP_READ(_schedop, _type, _field) \
348 PRE_MEM_READ("XEN_SCHEDOP_" # _schedop " " #_field, \
349 (Addr)&((_type*)arg)->_field, \
350 sizeof(((_type*)arg)->_field))
351 #define PRE_XEN_SCHEDOP_READ(_schedop, _field) \
352 __PRE_XEN_SCHEDOP_READ(_schedop, vki_xen_ ## _schedop ## _t, _field)
354 switch (ARG1) {
356 default:
357 bad_subop(tid, layout, arrghs, status, flags,
358 "__HYPERVISOR_sched_op", ARG1);
359 break;
361 #undef __PRE_XEN_SCHEDOP_READ
362 #undef PRE_XEN_SCHEDOP_READ
365 static void pre_evtchn_op(ThreadId tid,
366 SyscallArgLayout* layout,
367 /*MOD*/SyscallArgs* arrghs,
368 /*OUT*/SyscallStatus* status,
369 /*OUT*/UWord* flags,
370 __vki_u32 cmd, void *arg, int compat)
372 PRINT("__HYPERVISOR_event_channel_op%s ( %d, %p )",
373 compat ? "_compat" : "", cmd, arg);
375 switch (cmd) {
376 case VKI_XEN_EVTCHNOP_alloc_unbound: {
377 struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
378 PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
379 (Addr)&alloc_unbound->dom, sizeof(alloc_unbound->dom));
380 PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
381 (Addr)&alloc_unbound->remote_dom,
382 sizeof(alloc_unbound->remote_dom));
383 break;
385 default:
386 if ( compat )
387 bad_subop(tid, layout, arrghs, status, flags,
388 "__HYPERVISOR_event_channel_op_compat", cmd);
389 else
390 bad_subop(tid, layout, arrghs, status, flags,
391 "__HYPERVISOR_event_channel_op", cmd);
392 break;
396 PRE(evtchn_op)
398 pre_evtchn_op(tid, layout, arrghs, status, flags,
399 ARG1, (void *)ARG2, 0);
402 PRE(evtchn_op_compat)
404 struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
405 PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
406 ARG1, sizeof(*evtchn));
408 pre_evtchn_op(tid, layout, arrghs, status, flags,
409 evtchn->cmd, &evtchn->u, 1);
412 PRE(xen_version)
414 PRINT("__HYPERVISOR_xen_version ( %ld, %lx )", ARG1, ARG2);
416 switch (ARG1) {
417 case VKI_XENVER_version:
418 case VKI_XENVER_extraversion:
419 case VKI_XENVER_compile_info:
420 case VKI_XENVER_capabilities:
421 case VKI_XENVER_changeset:
422 case VKI_XENVER_platform_parameters:
423 case VKI_XENVER_get_features:
424 case VKI_XENVER_pagesize:
425 case VKI_XENVER_guest_handle:
426 case VKI_XENVER_commandline:
427 /* No inputs */
428 break;
430 default:
431 bad_subop(tid, layout, arrghs, status, flags,
432 "__HYPERVISOR_xen_version", ARG1);
433 break;
437 PRE(grant_table_op)
439 PRINT("__HYPERVISOR_grant_table_op ( %ld, 0x%lx, %ld )", ARG1, ARG2, ARG3);
440 switch (ARG1) {
441 case VKI_XEN_GNTTABOP_setup_table: {
442 struct vki_xen_gnttab_setup_table *gst =
443 (struct vki_xen_gnttab_setup_table*)ARG2;
444 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
445 (Addr)&gst->dom, sizeof(gst->dom));
446 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
447 (Addr)&gst->nr_frames, sizeof(gst->nr_frames));
448 break;
450 default:
451 bad_subop(tid, layout, arrghs, status, flags,
452 "__HYPERVISOR_grant_table_op", ARG1);
453 break;
457 PRE(sysctl) {
458 struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
460 PRINT("__HYPERVISOR_sysctl ( %d )", sysctl->cmd);
463 * Common part of xen_sysctl:
464 * uint32_t cmd;
465 * uint32_t interface_version;
467 PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1,
468 sizeof(vki_uint32_t) + sizeof(vki_uint32_t));
470 if (!sysctl)
471 return;
473 switch (sysctl->interface_version)
475 case 0x00000008:
476 case 0x00000009:
477 case 0x0000000a:
478 case 0x0000000b:
479 break;
480 default:
481 bad_intf_version(tid, layout, arrghs, status, flags,
482 "__HYPERVISOR_sysctl", sysctl->interface_version);
483 return;
486 #define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) \
487 PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field, \
488 (Addr)&sysctl->u._union._field, \
489 sizeof(sysctl->u._union._field))
490 #define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
491 __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
493 switch (sysctl->cmd) {
494 case VKI_XEN_SYSCTL_readconsole:
495 /* These are all unconditionally read */
496 PRE_XEN_SYSCTL_READ(readconsole, clear);
497 PRE_XEN_SYSCTL_READ(readconsole, incremental);
498 PRE_XEN_SYSCTL_READ(readconsole, buffer);
499 PRE_XEN_SYSCTL_READ(readconsole, count);
501 /* 'index' only read if 'incremental' is nonzero */
502 if (sysctl->u.readconsole.incremental)
503 PRE_XEN_SYSCTL_READ(readconsole, index);
504 break;
506 case VKI_XEN_SYSCTL_getdomaininfolist:
507 switch (sysctl->interface_version)
509 case 0x00000008:
510 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, first_domain);
511 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, max_domains);
512 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, buffer);
513 break;
514 case 0x00000009:
515 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, first_domain);
516 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, max_domains);
517 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, buffer);
518 break;
519 case 0x0000000a:
520 case 0x0000000b:
521 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
522 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
523 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
524 break;
525 default:
526 VG_(dmsg)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
527 "%"PRIx32" not implemented yet\n",
528 sysctl->interface_version);
529 SET_STATUS_Failure(VKI_EINVAL);
530 return;
532 break;
534 case VKI_XEN_SYSCTL_debug_keys:
535 PRE_XEN_SYSCTL_READ(debug_keys, keys);
536 PRE_XEN_SYSCTL_READ(debug_keys, nr_keys);
537 PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
538 (Addr)sysctl->u.debug_keys.keys.p,
539 sysctl->u.debug_keys.nr_keys * sizeof(char));
540 break;
542 case VKI_XEN_SYSCTL_sched_id:
543 /* No inputs */
544 break;
546 case VKI_XEN_SYSCTL_cpupool_op:
547 PRE_XEN_SYSCTL_READ(cpupool_op, op);
549 switch(sysctl->u.cpupool_op.op) {
550 case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE:
551 case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY:
552 case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO:
553 case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
554 case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU:
555 case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
556 PRE_XEN_SYSCTL_READ(cpupool_op, cpupool_id);
559 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE)
560 PRE_XEN_SYSCTL_READ(cpupool_op, sched_id);
562 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
563 PRE_XEN_SYSCTL_READ(cpupool_op, domid);
565 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
566 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU)
567 PRE_XEN_SYSCTL_READ(cpupool_op, cpu);
569 break;
571 case VKI_XEN_SYSCTL_physinfo:
572 /* No input params */
573 break;
575 case VKI_XEN_SYSCTL_topologyinfo:
576 PRE_XEN_SYSCTL_READ(topologyinfo, max_cpu_index);
577 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_core);
578 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_socket);
579 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_node);
580 break;
582 case VKI_XEN_SYSCTL_numainfo:
583 PRE_XEN_SYSCTL_READ(numainfo, max_node_index);
584 PRE_XEN_SYSCTL_READ(numainfo, node_to_memsize);
585 PRE_XEN_SYSCTL_READ(numainfo, node_to_memfree);
586 PRE_XEN_SYSCTL_READ(numainfo, node_to_node_distance);
587 break;
589 default:
590 bad_subop(tid, layout, arrghs, status, flags,
591 "__HYPERVISOR_sysctl", sysctl->cmd);
592 break;
594 #undef PRE_XEN_SYSCTL_READ
595 #undef __PRE_XEN_SYSCTL_READ
598 PRE(domctl)
600 struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
602 PRINT("__HYPERVISOR_domctl ( %d ) on dom%d", domctl->cmd, domctl->domain);
605 * Common part of xen_domctl:
606 * vki_uint32_t cmd;
607 * vki_uint32_t interface_version;
608 * vki_xen_domid_t domain;
610 PRE_MEM_READ("__HYPERVISOR_domctl", ARG1,
611 sizeof(vki_uint32_t) + sizeof(vki_uint32_t)
612 + sizeof(vki_xen_domid_t));
614 if (!domctl)
615 return;
617 switch (domctl->interface_version)
619 case 0x00000007:
620 case 0x00000008:
621 case 0x00000009:
622 case 0x0000000a:
623 break;
624 default:
625 bad_intf_version(tid, layout, arrghs, status, flags,
626 "__HYPERVISOR_domctl", domctl->interface_version);
627 return;
630 #define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) \
631 PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field, \
632 (Addr)&domctl->u._union._field, \
633 sizeof(domctl->u._union._field))
634 #define PRE_XEN_DOMCTL_READ(_domctl, _field) \
635 __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
637 switch (domctl->cmd) {
638 case VKI_XEN_DOMCTL_destroydomain:
639 case VKI_XEN_DOMCTL_pausedomain:
640 case VKI_XEN_DOMCTL_max_vcpus:
641 case VKI_XEN_DOMCTL_get_address_size:
642 case VKI_XEN_DOMCTL_gettscinfo:
643 case VKI_XEN_DOMCTL_getdomaininfo:
644 case VKI_XEN_DOMCTL_unpausedomain:
645 case VKI_XEN_DOMCTL_resumedomain:
646 /* No input fields. */
647 break;
649 case VKI_XEN_DOMCTL_createdomain:
650 PRE_XEN_DOMCTL_READ(createdomain, ssidref);
651 PRE_XEN_DOMCTL_READ(createdomain, handle);
652 PRE_XEN_DOMCTL_READ(createdomain, flags);
653 break;
655 case VKI_XEN_DOMCTL_gethvmcontext:
656 /* Xen unconditionally reads the 'buffer' pointer */
657 __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, buffer);
658 /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
659 * buffer is a request for the required size. */
660 if ( domctl->u.hvmcontext.buffer.p )
661 __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, size);
662 break;
664 case VKI_XEN_DOMCTL_sethvmcontext:
665 __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, size);
666 __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, buffer);
667 PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
668 (Addr)domctl->u.hvmcontext.buffer.p,
669 domctl->u.hvmcontext.size);
670 break;
672 case VKI_XEN_DOMCTL_gethvmcontext_partial:
673 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, type);
674 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, instance);
675 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, buffer);
677 switch (domctl->u.hvmcontext_partial.type) {
678 case VKI_HVM_SAVE_CODE(CPU):
679 if ( domctl->u.hvmcontext_partial.buffer.p )
680 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
681 (Addr)domctl->u.hvmcontext_partial.buffer.p,
682 VKI_HVM_SAVE_LENGTH(CPU));
683 break;
684 default:
685 bad_subop(tid, layout, arrghs, status, flags,
686 "__HYPERVISOR_domctl_gethvmcontext_partial type",
687 domctl->u.hvmcontext_partial.type);
688 break;
690 break;
692 case VKI_XEN_DOMCTL_max_mem:
693 PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
694 break;
696 case VKI_XEN_DOMCTL_set_address_size:
697 __PRE_XEN_DOMCTL_READ(set_address_size, address_size, size);
698 break;
700 case VKI_XEN_DOMCTL_test_assign_device:
701 __PRE_XEN_DOMCTL_READ(test_assign_device, assign_device, machine_sbdf);
702 break;
703 case VKI_XEN_DOMCTL_assign_device:
704 __PRE_XEN_DOMCTL_READ(assign_device, assign_device, machine_sbdf);
705 break;
706 case VKI_XEN_DOMCTL_deassign_device:
707 __PRE_XEN_DOMCTL_READ(deassign_device, assign_device, machine_sbdf);
708 break;
710 case VKI_XEN_DOMCTL_settscinfo:
711 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.tsc_mode);
712 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.gtsc_khz);
713 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.incarnation);
714 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
715 break;
717 case VKI_XEN_DOMCTL_irq_permission:
718 PRE_XEN_DOMCTL_READ(irq_permission, pirq);
719 PRE_XEN_DOMCTL_READ(irq_permission, allow_access);
720 break;
722 case VKI_XEN_DOMCTL_iomem_permission:
723 PRE_XEN_DOMCTL_READ(iomem_permission, first_mfn);
724 PRE_XEN_DOMCTL_READ(iomem_permission, nr_mfns);
725 PRE_XEN_DOMCTL_READ(iomem_permission, allow_access);
726 break;
728 case VKI_XEN_DOMCTL_ioport_permission:
729 PRE_XEN_DOMCTL_READ(ioport_permission, first_port);
730 PRE_XEN_DOMCTL_READ(ioport_permission, nr_ports);
731 PRE_XEN_DOMCTL_READ(ioport_permission, allow_access);
732 break;
734 case VKI_XEN_DOMCTL_hypercall_init:
735 PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
736 break;
738 case VKI_XEN_DOMCTL_settimeoffset:
739 PRE_XEN_DOMCTL_READ(settimeoffset, time_offset_seconds);
740 break;
742 case VKI_XEN_DOMCTL_getvcpuinfo:
743 PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
744 break;
746 case VKI_XEN_DOMCTL_scheduler_op:
747 PRE_XEN_DOMCTL_READ(scheduler_op, sched_id);
748 PRE_XEN_DOMCTL_READ(scheduler_op, cmd);
749 if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_putinfo ) {
750 switch(domctl->u.scheduler_op.sched_id) {
751 case VKI_XEN_SCHEDULER_SEDF:
752 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.period);
753 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.slice);
754 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.latency);
755 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.extratime);
756 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.weight);
757 break;
758 case VKI_XEN_SCHEDULER_CREDIT:
759 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.weight);
760 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.cap);
761 break;
762 case VKI_XEN_SCHEDULER_CREDIT2:
763 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit2.weight);
764 break;
765 case VKI_XEN_SCHEDULER_RTDS:
766 PRE_XEN_DOMCTL_READ(scheduler_op, u.rtds.period);
767 PRE_XEN_DOMCTL_READ(scheduler_op, u.rtds.budget);
768 break;
769 case VKI_XEN_SCHEDULER_ARINC653:
770 break;
773 break;
775 case VKI_XEN_DOMCTL_getvcpuaffinity:
776 switch (domctl->interface_version) {
777 case 0x00000007:
778 case 0x00000008:
779 case 0x00000009:
780 __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_00000009, vcpu);
781 __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_00000009, cpumap.nr_bits);
782 break;
783 case 0x0000000a:
784 __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity_0000000a, vcpu);
785 if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
786 __PRE_XEN_DOMCTL_READ(
787 setvcpuaffinity, vcpuaffinity_0000000a, cpumap_hard.nr_bits);
788 if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_SOFT)
789 __PRE_XEN_DOMCTL_READ(
790 setvcpuaffinity, vcpuaffinity_0000000a, cpumap_soft.nr_bits);
791 break;
793 break;
795 case VKI_XEN_DOMCTL_setvcpuaffinity:
796 switch (domctl->interface_version) {
797 case 0x00000007:
798 case 0x00000008:
799 case 0x00000009:
800 __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_00000009, vcpu);
801 __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_00000009, cpumap.nr_bits);
802 PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
803 (Addr)domctl->u.vcpuaffinity_00000009.cpumap.bitmap.p,
804 domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
805 break;
806 case 0x0000000a:
807 __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, vcpu);
808 __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity_0000000a, flags);
809 if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD) {
810 __PRE_XEN_DOMCTL_READ(
811 setvcpuaffinity, vcpuaffinity_0000000a, cpumap_hard.nr_bits);
812 PRE_MEM_READ(
813 "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_hard.bitmap",
814 (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_hard.bitmap.p,
815 domctl->u.vcpuaffinity_0000000a.cpumap_hard.nr_bits / 8);
817 if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_SOFT) {
818 __PRE_XEN_DOMCTL_READ(
819 setvcpuaffinity, vcpuaffinity_0000000a, cpumap_soft.nr_bits);
820 PRE_MEM_READ(
821 "XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap_soft.bitmap",
822 (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_soft.bitmap.p,
823 domctl->u.vcpuaffinity_0000000a.cpumap_soft.nr_bits / 8);
825 break;
827 break;
829 case VKI_XEN_DOMCTL_getnodeaffinity:
830 __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
831 break;
832 case VKI_XEN_DOMCTL_setnodeaffinity:
833 __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
834 PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
835 (Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
836 domctl->u.nodeaffinity.nodemap.nr_bits / 8);
837 break;
839 case VKI_XEN_DOMCTL_getvcpucontext:
840 __PRE_XEN_DOMCTL_READ(getvcpucontext, vcpucontext, vcpu);
841 break;
843 case VKI_XEN_DOMCTL_setvcpucontext:
844 __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, vcpu);
845 __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, ctxt.p);
846 break;
848 case VKI_XEN_DOMCTL_pin_mem_cacheattr:
849 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr, start);
850 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr, end);
851 PRE_XEN_DOMCTL_READ(pin_mem_cacheattr, type);
852 break;
854 case VKI_XEN_DOMCTL_get_ext_vcpucontext:
855 switch (domctl->interface_version)
857 case 0x00000007:
858 case 0x00000008:
859 __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000008, vcpu);
860 break;
862 case 0x00000009:
863 __PRE_XEN_DOMCTL_READ(get_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
864 break;
866 default:
867 VG_(dmsg)("WARNING: VKI_XEN_DOMCTL_get_ext_vcpucontext domctl version %#"
868 PRIx32" not implemented\n", domctl->interface_version);
869 SET_STATUS_Failure(VKI_EINVAL);
870 break;
872 break;
874 case VKI_XEN_DOMCTL_set_ext_vcpucontext:
875 switch (domctl->interface_version)
877 case 0x00000007:
878 case 0x00000008:
879 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008, vcpu);
880 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008, size);
881 #if defined(__i386__) || defined(__x86_64__)
882 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
883 syscall32_callback_eip);
884 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
885 sysenter_callback_eip);
886 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
887 syscall32_callback_cs);
888 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
889 sysenter_callback_cs);
890 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
891 syscall32_disables_events);
892 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
893 sysenter_disables_events);
895 if ( domctl->u.ext_vcpucontext_00000008.size >=
896 offsetof(struct vki_xen_domctl_ext_vcpucontext_00000008, mcg_cap) )
897 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000008,
898 mcg_cap);
899 #endif
900 break;
902 case 0x00000009:
903 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, vcpu);
904 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009, size);
905 #if defined(__i386__) || defined(__x86_64__)
906 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
907 syscall32_callback_eip);
908 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
909 sysenter_callback_eip);
910 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
911 syscall32_callback_cs);
912 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
913 sysenter_callback_cs);
914 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
915 syscall32_disables_events);
916 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
917 sysenter_disables_events);
919 if ( domctl->u.ext_vcpucontext_00000009.size >=
920 offsetof(struct vki_xen_domctl_ext_vcpucontext_00000009, caps) )
922 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
923 caps);
924 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
925 mci_ctl2_bank0);
926 __PRE_XEN_DOMCTL_READ(set_ext_vcpucontext, ext_vcpucontext_00000009,
927 mci_ctl2_bank1);
929 #endif
930 break;
932 default:
933 VG_(dmsg)("WARNING: VKI_XEN_DOMCTL_set_ext_vcpucontext domctl version %#"
934 PRIx32" not implemented\n", domctl->interface_version);
935 SET_STATUS_Failure(VKI_EINVAL);
936 break;
938 break;
940 case VKI_XEN_DOMCTL_set_cpuid:
941 PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
942 (Addr)&domctl->u.cpuid, sizeof(domctl->u.cpuid));
943 break;
945 case VKI_XEN_DOMCTL_getpageframeinfo3:
946 PRE_XEN_DOMCTL_READ(getpageframeinfo3, num);
947 PRE_XEN_DOMCTL_READ(getpageframeinfo3, array.p);
948 PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
949 (Addr)domctl->u.getpageframeinfo3.array.p,
950 domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
951 break;
953 case VKI_XEN_DOMCTL_setvcpuextstate:
954 __PRE_XEN_DOMCTL_READ(setvcpuextstate, vcpuextstate, vcpu);
955 __PRE_XEN_DOMCTL_READ(setvcpuextstate, vcpuextstate, size);
956 __PRE_XEN_DOMCTL_READ(setvcpuextstate, vcpuextstate, buffer);
957 PRE_MEM_READ("XEN_DOMCTL_setvcpuextstate *u.vcpuextstate.buffer.p",
958 (Addr)domctl->u.vcpuextstate.buffer.p,
959 domctl->u.vcpuextstate.size);
960 break;
962 case VKI_XEN_DOMCTL_getvcpuextstate:
963 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, vcpu);
964 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, xfeature_mask);
965 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, size);
966 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, buffer);
967 break;
969 case VKI_XEN_DOMCTL_shadow_op:
970 PRE_XEN_DOMCTL_READ(shadow_op, op);
972 switch(domctl->u.shadow_op.op)
974 case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
975 case VKI_XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
976 /* No further inputs */
977 break;
979 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE:
980 PRE_XEN_DOMCTL_READ(shadow_op, mode);
981 switch(domctl->u.shadow_op.mode)
983 case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY:
984 goto domctl_shadow_op_enable_logdirty;
987 default:
988 bad_subop(tid, layout, arrghs, status, flags,
989 "__HYPERVISOR_domctl shadowop mode",
990 domctl->u.shadow_op.mode);
991 break;
994 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
995 domctl_shadow_op_enable_logdirty:
996 /* No further inputs */
997 break;
999 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
1000 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
1001 PRE_XEN_DOMCTL_READ(shadow_op, dirty_bitmap);
1002 PRE_XEN_DOMCTL_READ(shadow_op, pages);
1003 break;
1005 case VKI_XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
1006 PRE_XEN_DOMCTL_READ(shadow_op, mb);
1007 break;
1009 default:
1010 bad_subop(tid, layout, arrghs, status, flags,
1011 "__HYPERVISOR_domctl shadow(10)",
1012 domctl->u.shadow_op.op);
1013 break;
1015 break;
1017 case VKI_XEN_DOMCTL_set_max_evtchn:
1018 PRE_XEN_DOMCTL_READ(set_max_evtchn, max_port);
1019 break;
1021 case VKI_XEN_DOMCTL_cacheflush:
1022 PRE_XEN_DOMCTL_READ(cacheflush, start_pfn);
1023 PRE_XEN_DOMCTL_READ(cacheflush, nr_pfns);
1024 break;
1026 case VKI_XEN_DOMCTL_set_access_required:
1027 PRE_XEN_DOMCTL_READ(access_required, access_required);
1028 break;
1030 case VKI_XEN_DOMCTL_mem_event_op:
1031 PRE_XEN_DOMCTL_READ(mem_event_op, op);
1032 PRE_XEN_DOMCTL_READ(mem_event_op, mode);
1033 break;
1035 case VKI_XEN_DOMCTL_debug_op:
1036 PRE_XEN_DOMCTL_READ(debug_op, op);
1037 PRE_XEN_DOMCTL_READ(debug_op, vcpu);
1038 break;
1040 case VKI_XEN_DOMCTL_get_vcpu_msrs:
1041 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs, vcpu_msrs, vcpu);
1042 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs, vcpu_msrs, msr_count);
1043 __PRE_XEN_DOMCTL_READ(get_vcpu_msrs, vcpu_msrs, msrs);
1044 break;
1046 case VKI_XEN_DOMCTL_set_vcpu_msrs:
1047 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs, vcpu_msrs, vcpu);
1048 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs, vcpu_msrs, msr_count);
1049 __PRE_XEN_DOMCTL_READ(set_vcpu_msrs, vcpu_msrs, msrs);
1050 PRE_MEM_READ("XEN_DOMCTL_set_vcpu_msrs *u.vcpu_msrs.msrs.p",
1051 (Addr)domctl->u.vcpu_msrs.msrs.p,
1052 sizeof(vki_xen_domctl_vcpu_msr_t) *
1053 domctl->u.vcpu_msrs.msr_count);
1054 break;
1056 default:
1057 bad_subop(tid, layout, arrghs, status, flags,
1058 "__HYPERVISOR_domctl", domctl->cmd);
1059 break;
1061 #undef PRE_XEN_DOMCTL_READ
1062 #undef __PRE_XEN_DOMCTL_READ
1065 PRE(hvm_op)
1067 unsigned long op = ARG1;
1068 void *arg = (void *)(unsigned long)ARG2;
1070 PRINT("__HYPERVISOR_hvm_op ( %ld, %p )", op, arg);
1072 #define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) \
1073 PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field, \
1074 (Addr)&((_type*)arg)->_field, \
1075 sizeof(((_type*)arg)->_field))
1076 #define PRE_XEN_HVMOP_READ(_hvm_op, _field) \
1077 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1079 switch (op) {
1080 case VKI_XEN_HVMOP_set_param:
1081 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, domid);
1082 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, index);
1083 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, value);
1084 break;
1086 case VKI_XEN_HVMOP_get_param:
1087 __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, domid);
1088 __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, index);
1089 break;
1091 case VKI_XEN_HVMOP_set_pci_intx_level:
1092 PRE_XEN_HVMOP_READ(set_pci_intx_level, domid);
1093 PRE_XEN_HVMOP_READ(set_pci_intx_level, domain);
1094 PRE_XEN_HVMOP_READ(set_pci_intx_level, bus);
1095 PRE_XEN_HVMOP_READ(set_pci_intx_level, device);
1096 PRE_XEN_HVMOP_READ(set_pci_intx_level, level);
1097 break;
1099 case VKI_XEN_HVMOP_set_isa_irq_level:
1100 PRE_XEN_HVMOP_READ(set_isa_irq_level, domid);
1101 PRE_XEN_HVMOP_READ(set_isa_irq_level, isa_irq);
1102 PRE_XEN_HVMOP_READ(set_isa_irq_level, level);
1103 break;
1105 case VKI_XEN_HVMOP_set_pci_link_route:
1106 PRE_XEN_HVMOP_READ(set_pci_link_route, domid);
1107 PRE_XEN_HVMOP_READ(set_pci_link_route, link);
1108 PRE_XEN_HVMOP_READ(set_pci_link_route, isa_irq);
1109 break;
1111 case VKI_XEN_HVMOP_track_dirty_vram: {
1112 vki_xen_hvm_track_dirty_vram_t *Arg =
1113 (vki_xen_hvm_track_dirty_vram_t*)ARG2;
1114 PRE_XEN_HVMOP_READ(track_dirty_vram, domid);
1115 PRE_XEN_HVMOP_READ(track_dirty_vram, nr);
1116 if ( Arg->nr ) {
1117 PRE_XEN_HVMOP_READ(track_dirty_vram, first_pfn);
1118 PRE_XEN_HVMOP_READ(track_dirty_vram, dirty_bitmap);
1120 break;
1123 case VKI_XEN_HVMOP_set_mem_type:
1124 PRE_XEN_HVMOP_READ(set_mem_type, domid);
1125 PRE_XEN_HVMOP_READ(set_mem_type, hvmmem_type);
1126 PRE_XEN_HVMOP_READ(set_mem_type, nr);
1127 PRE_XEN_HVMOP_READ(set_mem_type, first_pfn);
1128 break;
1130 case VKI_XEN_HVMOP_set_mem_access:
1131 PRE_XEN_HVMOP_READ(set_mem_access, domid);
1132 PRE_XEN_HVMOP_READ(set_mem_access, hvmmem_access);
1133 PRE_XEN_HVMOP_READ(set_mem_access, first_pfn);
1134 /* if default access */
1135 if ( ((vki_xen_hvm_set_mem_access_t*)arg)->first_pfn != ~0ULL)
1136 PRE_XEN_HVMOP_READ(set_mem_access, nr);
1137 break;
1139 case VKI_XEN_HVMOP_get_mem_access:
1140 PRE_XEN_HVMOP_READ(get_mem_access, domid);
1141 PRE_XEN_HVMOP_READ(get_mem_access, pfn);
1143 PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
1144 (Addr)&(((vki_xen_hvm_get_mem_access_t*)arg)->hvmmem_access),
1145 sizeof(vki_uint16_t));
1146 break;
1148 case VKI_XEN_HVMOP_inject_trap:
1149 PRE_XEN_HVMOP_READ(inject_trap, domid);
1150 PRE_XEN_HVMOP_READ(inject_trap, vcpuid);
1151 PRE_XEN_HVMOP_READ(inject_trap, vector);
1152 PRE_XEN_HVMOP_READ(inject_trap, type);
1153 PRE_XEN_HVMOP_READ(inject_trap, error_code);
1154 PRE_XEN_HVMOP_READ(inject_trap, insn_len);
1155 PRE_XEN_HVMOP_READ(inject_trap, cr2);
1156 break;
1158 default:
1159 bad_subop(tid, layout, arrghs, status, flags,
1160 "__HYPERVISOR_hvm_op", op);
1161 break;
1163 #undef __PRE_XEN_HVMOP_READ
1164 #undef PRE_XEN_HVMOP_READ
1167 PRE(tmem_op)
1169 struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
1171 PRINT("__HYPERVISOR_tmem_op ( %d )", tmem->cmd);
1173 /* Common part for xen_tmem_op:
1174 * vki_uint32_t cmd;
1176 PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1, sizeof(vki_uint32_t));
1179 #define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field) \
1180 PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field, \
1181 (Addr)&tmem->u._union._field, \
1182 sizeof(tmem->u._union._field))
1183 #define PRE_XEN_TMEMOP_READ(_tmem, _field) \
1184 __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
1186 switch(tmem->cmd) {
1188 case VKI_XEN_TMEM_control:
1190 /* Common part for control hypercall:
1191 * vki_int32_t pool_id;
1192 * vki_uint32_t subop;
1194 PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
1195 (Addr)&tmem->pool_id, sizeof(tmem->pool_id));
1196 PRE_XEN_TMEMOP_READ(ctrl, subop);
1198 switch (tmem->u.ctrl.subop) {
1200 case VKI_XEN_TMEMC_save_begin:
1201 PRE_XEN_TMEMOP_READ(ctrl, cli_id);
1202 PRE_XEN_TMEMOP_READ(ctrl, arg1);
1203 PRE_XEN_TMEMOP_READ(ctrl, buf);
1204 break;
1206 default:
1207 bad_subop(tid, layout, arrghs, status, flags,
1208 "__HYPERVISOR_tmem_op_control", tmem->u.ctrl.subop);
1211 break;
1213 default:
1214 bad_subop(tid, layout, arrghs, status, flags,
1215 "__HYPERVISOR_tmem_op", ARG1);
1218 #undef PRE_XEN_TMEMOP_READ
1219 #undef __PRE_XEN_TMEMOP_READ
1222 POST(memory_op)
1224 switch (ARG1) {
1225 case VKI_XENMEM_maximum_ram_page:
1226 case VKI_XENMEM_set_memory_map:
1227 case VKI_XENMEM_decrease_reservation:
1228 case VKI_XENMEM_claim_pages:
1229 case VKI_XENMEM_maximum_gpfn:
1230 case VKI_XENMEM_remove_from_physmap:
1231 case VKI_XENMEM_access_op:
1232 /* No outputs */
1233 break;
1234 case VKI_XENMEM_increase_reservation:
1235 case VKI_XENMEM_populate_physmap: {
1236 struct xen_memory_reservation *memory_reservation =
1237 (struct xen_memory_reservation *)ARG2;
1239 POST_MEM_WRITE((Addr)memory_reservation->extent_start.p,
1240 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
1241 break;
1244 case VKI_XENMEM_machphys_mfn_list:
1245 case VKI_XENMEM_machphys_compat_mfn_list: {
1246 struct vki_xen_machphys_mfn_list *arg =
1247 (struct vki_xen_machphys_mfn_list *)ARG2;
1248 POST_MEM_WRITE((Addr)&arg->nr_extents, sizeof(arg->nr_extents));
1249 POST_MEM_WRITE((Addr)arg->extent_start.p,
1250 sizeof(vki_xen_pfn_t) * arg->nr_extents);
1251 break;
1254 case VKI_XENMEM_memory_map:
1255 case VKI_XENMEM_machine_memory_map: {
1256 struct vki_xen_memory_map *arg =
1257 (struct vki_xen_memory_map *)ARG2;
1258 POST_MEM_WRITE(arg->nr_entries, sizeof(arg->nr_entries));
1259 POST_MEM_WRITE((Addr)arg->buffer.p,
1260 arg->nr_entries * 20 /* size of an e820 entry */);
1261 break;
1264 case VKI_XENMEM_add_to_physmap: {
1265 struct vki_xen_add_to_physmap *arg =
1266 (struct vki_xen_add_to_physmap *)ARG2;
1267 if (arg->space == VKI_XENMAPSPACE_gmfn_range)
1268 POST_MEM_WRITE(ARG2, sizeof(*arg));
1271 case VKI_XENMEM_get_sharing_freed_pages:
1272 case VKI_XENMEM_get_sharing_shared_pages:
1273 /* No outputs */
1274 break;
1278 POST(mmuext_op)
1280 unsigned int *pdone = (unsigned int *)ARG3;
1281 /* simplistic */
1282 POST_MEM_WRITE((Addr)pdone, sizeof(*pdone));
1285 static void post_evtchn_op(ThreadId tid, __vki_u32 cmd, void *arg, int compat)
1287 switch (cmd) {
1288 case VKI_XEN_EVTCHNOP_alloc_unbound: {
1289 struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
1290 POST_MEM_WRITE((Addr)&alloc_unbound->port, sizeof(alloc_unbound->port));
1291 break;
1296 POST(sched_op)
1298 switch (ARG1) {
1302 POST(evtchn_op)
1304 post_evtchn_op(tid, ARG1, (void *)ARG2, 0);
1307 POST(evtchn_op_compat)
1309 struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
1310 post_evtchn_op(tid, evtchn->cmd, &evtchn->u, 1);
1313 POST(xen_version)
1315 switch (ARG1) {
1316 case VKI_XENVER_version:
1317 /* No outputs */
1318 break;
1319 case VKI_XENVER_extraversion:
1320 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_extraversion_t));
1321 break;
1322 case VKI_XENVER_compile_info:
1323 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_compile_info));
1324 break;
1325 case VKI_XENVER_capabilities:
1326 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_capabilities_info_t));
1327 break;
1328 case VKI_XENVER_changeset:
1329 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_changeset_info_t));
1330 break;
1331 case VKI_XENVER_platform_parameters:
1332 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_platform_parameters));
1333 break;
1334 case VKI_XENVER_get_features:
1335 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_feature_info));
1336 break;
1337 case VKI_XENVER_pagesize:
1338 /* No outputs */
1339 break;
1340 case VKI_XENVER_guest_handle:
1341 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_domain_handle_t));
1342 break;
1343 case VKI_XENVER_commandline:
1344 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_commandline_t));
1345 break;
1349 POST(grant_table_op)
1351 switch (ARG1) {
1352 case VKI_XEN_GNTTABOP_setup_table: {
1353 struct vki_xen_gnttab_setup_table *gst =
1354 (struct vki_xen_gnttab_setup_table*)ARG2;
1355 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1356 (Addr)&gst->status, sizeof(gst->status));
1357 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1358 (Addr)gst->frame_list.p,
1359 sizeof(*gst->frame_list.p) & gst->nr_frames);
1360 break;
1365 POST(sysctl)
1367 struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
1369 switch (sysctl->interface_version)
1371 case 0x00000008:
1372 case 0x00000009:
1373 case 0x0000000a:
1374 case 0x0000000b:
1375 break;
1376 default:
1377 return;
1380 #define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
1381 POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
1382 sizeof(sysctl->u._union._field))
1383 #define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1384 __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1386 switch (sysctl->cmd) {
1387 case VKI_XEN_SYSCTL_readconsole:
1388 POST_MEM_WRITE((Addr)sysctl->u.readconsole.buffer.p,
1389 sysctl->u.readconsole.count * sizeof(char));
1390 break;
1392 case VKI_XEN_SYSCTL_getdomaininfolist:
1393 switch (sysctl->interface_version)
1395 case 0x00000008:
1396 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008, num_domains);
1397 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000008.buffer.p,
1398 sizeof(*sysctl->u.getdomaininfolist_00000008.buffer.p)
1399 * sysctl->u.getdomaininfolist_00000008.num_domains);
1400 break;
1401 case 0x00000009:
1402 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009, num_domains);
1403 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000009.buffer.p,
1404 sizeof(*sysctl->u.getdomaininfolist_00000009.buffer.p)
1405 * sysctl->u.getdomaininfolist_00000009.num_domains);
1406 break;
1407 case 0x0000000a:
1408 case 0x0000000b:
1409 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
1410 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
1411 sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
1412 * sysctl->u.getdomaininfolist_0000000a.num_domains);
1413 break;
1415 break;
1417 case VKI_XEN_SYSCTL_sched_id:
1418 POST_XEN_SYSCTL_WRITE(sched_id, sched_id);
1419 break;
1421 case VKI_XEN_SYSCTL_cpupool_op:
1422 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE ||
1423 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO)
1424 POST_XEN_SYSCTL_WRITE(cpupool_op, cpupool_id);
1425 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO) {
1426 POST_XEN_SYSCTL_WRITE(cpupool_op, sched_id);
1427 POST_XEN_SYSCTL_WRITE(cpupool_op, n_dom);
1429 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO ||
1430 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
1431 POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
1432 break;
1434 case VKI_XEN_SYSCTL_physinfo:
1435 switch (sysctl->interface_version)
1437 case 0x00000008:
1438 case 0x00000009: /* Unchanged from version 8 */
1439 POST_XEN_SYSCTL_WRITE(physinfo_00000008, threads_per_core);
1440 POST_XEN_SYSCTL_WRITE(physinfo_00000008, cores_per_socket);
1441 POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_cpus);
1442 POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_cpu_id);
1443 POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_nodes);
1444 POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_node_id);
1445 POST_XEN_SYSCTL_WRITE(physinfo_00000008, cpu_khz);
1446 POST_XEN_SYSCTL_WRITE(physinfo_00000008, total_pages);
1447 POST_XEN_SYSCTL_WRITE(physinfo_00000008, free_pages);
1448 POST_XEN_SYSCTL_WRITE(physinfo_00000008, scrub_pages);
1449 POST_XEN_SYSCTL_WRITE(physinfo_00000008, hw_cap[8]);
1450 POST_XEN_SYSCTL_WRITE(physinfo_00000008, capabilities);
1451 break;
1452 case 0x0000000a:
1453 case 0x0000000b:
1454 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
1455 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
1456 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
1457 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_cpu_id);
1458 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_nodes);
1459 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_node_id);
1460 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cpu_khz);
1461 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, total_pages);
1462 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, free_pages);
1463 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, scrub_pages);
1464 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, outstanding_pages);
1465 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
1466 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
1467 break;
1469 break;
1471 case VKI_XEN_SYSCTL_topologyinfo:
1472 POST_XEN_SYSCTL_WRITE(topologyinfo, max_cpu_index);
1473 if (sysctl->u.topologyinfo.cpu_to_core.p)
1474 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_core.p,
1475 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1476 if (sysctl->u.topologyinfo.cpu_to_socket.p)
1477 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_socket.p,
1478 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1479 if (sysctl->u.topologyinfo.cpu_to_node.p)
1480 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_node.p,
1481 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1482 break;
1484 case VKI_XEN_SYSCTL_numainfo:
1485 POST_XEN_SYSCTL_WRITE(numainfo, max_node_index);
1486 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memsize.p,
1487 sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1488 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memfree.p,
1489 sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1490 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_node_distance.p,
1491 sizeof(uint32_t) * sysctl->u.numainfo.max_node_index);
1492 break;
1494 /* No outputs */
1495 case VKI_XEN_SYSCTL_debug_keys:
1496 break;
1498 #undef POST_XEN_SYSCTL_WRITE
1499 #undef __POST_XEN_SYSCTL_WRITE
1502 POST(domctl){
1503 struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
1505 switch (domctl->interface_version) {
1506 case 0x00000007:
1507 case 0x00000008:
1508 case 0x00000009:
1509 case 0x0000000a:
1510 break;
1511 default:
1512 return;
1515 #define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
1516 POST_MEM_WRITE((Addr)&domctl->u._union._field, \
1517 sizeof(domctl->u._union._field));
1518 #define POST_XEN_DOMCTL_WRITE(_domctl, _field) \
1519 __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
1521 switch (domctl->cmd) {
1522 case VKI_XEN_DOMCTL_createdomain:
1523 case VKI_XEN_DOMCTL_destroydomain:
1524 case VKI_XEN_DOMCTL_pausedomain:
1525 case VKI_XEN_DOMCTL_max_mem:
1526 case VKI_XEN_DOMCTL_setvcpuextstate:
1527 case VKI_XEN_DOMCTL_set_address_size:
1528 case VKI_XEN_DOMCTL_test_assign_device:
1529 case VKI_XEN_DOMCTL_assign_device:
1530 case VKI_XEN_DOMCTL_deassign_device:
1531 case VKI_XEN_DOMCTL_settscinfo:
1532 case VKI_XEN_DOMCTL_irq_permission:
1533 case VKI_XEN_DOMCTL_iomem_permission:
1534 case VKI_XEN_DOMCTL_ioport_permission:
1535 case VKI_XEN_DOMCTL_hypercall_init:
1536 case VKI_XEN_DOMCTL_setvcpucontext:
1537 case VKI_XEN_DOMCTL_pin_mem_cacheattr:
1538 case VKI_XEN_DOMCTL_set_ext_vcpucontext:
1539 case VKI_XEN_DOMCTL_setnodeaffinity:
1540 case VKI_XEN_DOMCTL_set_cpuid:
1541 case VKI_XEN_DOMCTL_unpausedomain:
1542 case VKI_XEN_DOMCTL_sethvmcontext:
1543 case VKI_XEN_DOMCTL_debug_op:
1544 case VKI_XEN_DOMCTL_set_max_evtchn:
1545 case VKI_XEN_DOMCTL_cacheflush:
1546 case VKI_XEN_DOMCTL_resumedomain:
1547 case VKI_XEN_DOMCTL_set_vcpu_msrs:
1548 case VKI_XEN_DOMCTL_set_access_required:
1549 /* No output fields */
1550 break;
1552 case VKI_XEN_DOMCTL_max_vcpus:
1553 POST_XEN_DOMCTL_WRITE(max_vcpus, max);
1554 break;
1556 case VKI_XEN_DOMCTL_get_address_size:
1557 __POST_XEN_DOMCTL_WRITE(get_address_size, address_size, size);
1558 break;
1560 case VKI_XEN_DOMCTL_gettscinfo:
1561 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.tsc_mode);
1562 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.gtsc_khz);
1563 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.incarnation);
1564 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.elapsed_nsec);
1565 break;
1567 case VKI_XEN_DOMCTL_getvcpuinfo:
1568 POST_XEN_DOMCTL_WRITE(getvcpuinfo, online);
1569 POST_XEN_DOMCTL_WRITE(getvcpuinfo, blocked);
1570 POST_XEN_DOMCTL_WRITE(getvcpuinfo, running);
1571 POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu_time);
1572 POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
1573 break;
1575 case VKI_XEN_DOMCTL_gethvmcontext:
1576 /* Xen unconditionally writes size... */
1577 __POST_XEN_DOMCTL_WRITE(gethvmcontext, hvmcontext, size);
1578 /* ...but only writes to the buffer if it was non NULL */
1579 if ( domctl->u.hvmcontext.buffer.p )
1580 POST_MEM_WRITE((Addr)domctl->u.hvmcontext.buffer.p,
1581 sizeof(*domctl->u.hvmcontext.buffer.p)
1582 * domctl->u.hvmcontext.size);
1583 break;
1585 case VKI_XEN_DOMCTL_gethvmcontext_partial:
1586 switch (domctl->u.hvmcontext_partial.type) {
1587 case VKI_HVM_SAVE_CODE(CPU):
1588 if ( domctl->u.hvmcontext_partial.buffer.p )
1589 POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial.buffer.p,
1590 VKI_HVM_SAVE_LENGTH(CPU));
1591 break;
1593 break;
1595 case VKI_XEN_DOMCTL_scheduler_op:
1596 if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
1597 switch(domctl->u.scheduler_op.sched_id) {
1598 case VKI_XEN_SCHEDULER_SEDF:
1599 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.period);
1600 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.slice);
1601 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.latency);
1602 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.extratime);
1603 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.weight);
1604 break;
1605 case VKI_XEN_SCHEDULER_CREDIT:
1606 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.weight);
1607 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.cap);
1608 break;
1609 case VKI_XEN_SCHEDULER_CREDIT2:
1610 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit2.weight);
1611 break;
1612 case VKI_XEN_SCHEDULER_ARINC653:
1613 break;
1614 case VKI_XEN_SCHEDULER_RTDS:
1615 POST_XEN_DOMCTL_WRITE(scheduler_op, u.rtds.period);
1616 POST_XEN_DOMCTL_WRITE(scheduler_op, u.rtds.budget);
1617 break;
1620 break;
1622 case VKI_XEN_DOMCTL_getvcpuaffinity:
1623 case VKI_XEN_DOMCTL_setvcpuaffinity: /* Writes back actual result */
1624 switch (domctl->interface_version) {
1625 case 0x00000007:
1626 case 0x00000008:
1627 case 0x00000009:
1628 POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity_00000009.cpumap.bitmap.p,
1629 domctl->u.vcpuaffinity_00000009.cpumap.nr_bits / 8);
1630 break;
1631 case 0x0000000a:
1632 if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_HARD)
1633 POST_MEM_WRITE(
1634 (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_hard.bitmap.p,
1635 domctl->u.vcpuaffinity_0000000a.cpumap_hard.nr_bits / 8);
1636 if (domctl->u.vcpuaffinity_0000000a.flags & VKI_XEN_VCPUAFFINITY_SOFT)
1637 POST_MEM_WRITE(
1638 (Addr)domctl->u.vcpuaffinity_0000000a.cpumap_soft.bitmap.p,
1639 domctl->u.vcpuaffinity_0000000a.cpumap_soft.nr_bits / 8);
1641 break;
1643 case VKI_XEN_DOMCTL_getnodeaffinity:
1644 POST_MEM_WRITE((Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
1645 domctl->u.nodeaffinity.nodemap.nr_bits / 8);
1646 break;
1648 case VKI_XEN_DOMCTL_getdomaininfo:
1649 switch (domctl->interface_version) {
1650 case 0x00000007:
1651 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, domain);
1652 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, flags);
1653 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, tot_pages);
1654 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_pages);
1655 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shr_pages);
1656 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shared_info_frame);
1657 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpu_time);
1658 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, nr_online_vcpus);
1659 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_vcpu_id);
1660 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, ssidref);
1661 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, handle);
1662 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpupool);
1663 break;
1664 case 0x00000008:
1665 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, domain);
1666 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, flags);
1667 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, tot_pages);
1668 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_pages);
1669 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shr_pages);
1670 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, paged_pages);
1671 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shared_info_frame);
1672 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpu_time);
1673 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, nr_online_vcpus);
1674 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_vcpu_id);
1675 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, ssidref);
1676 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
1677 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
1678 break;
1679 case 0x00000009:
1680 case 0x0000000a:
1681 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
1682 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
1683 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
1684 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_pages);
1685 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, outstanding_pages);
1686 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shr_pages);
1687 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, paged_pages);
1688 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shared_info_frame);
1689 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpu_time);
1690 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, nr_online_vcpus);
1691 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_vcpu_id);
1692 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, ssidref);
1693 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, handle);
1694 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpupool);
1695 break;
1697 break;
1698 case VKI_XEN_DOMCTL_getvcpucontext:
1699 __POST_XEN_DOMCTL_WRITE(getvcpucontext, vcpucontext, ctxt.p);
1700 break;
1702 case VKI_XEN_DOMCTL_getpageframeinfo3:
1703 POST_MEM_WRITE((Addr)domctl->u.getpageframeinfo3.array.p,
1704 domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
1705 break;
1707 case VKI_XEN_DOMCTL_get_ext_vcpucontext:
1708 switch (domctl->interface_version)
1710 case 0x00000007:
1711 case 0x00000008:
1712 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008, size);
1713 #if defined(__i386__) || defined(__x86_64__)
1714 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1715 syscall32_callback_eip);
1716 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1717 sysenter_callback_eip);
1718 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1719 syscall32_callback_cs);
1720 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1721 sysenter_callback_cs);
1722 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1723 syscall32_disables_events);
1724 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1725 sysenter_disables_events);
1727 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000008,
1728 mcg_cap);
1729 #endif
1730 break;
1732 case 0x00000009:
1733 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009, size);
1734 #if defined(__i386__) || defined(__x86_64__)
1735 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1736 syscall32_callback_eip);
1737 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1738 sysenter_callback_eip);
1739 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1740 syscall32_callback_cs);
1741 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1742 sysenter_callback_cs);
1743 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1744 syscall32_disables_events);
1745 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1746 sysenter_disables_events);
1748 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1749 caps);
1750 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1751 mci_ctl2_bank0);
1752 __POST_XEN_DOMCTL_WRITE(get_ext_vcpucontext, ext_vcpucontext_00000009,
1753 mci_ctl2_bank1);
1754 #endif
1755 break;
1757 break;
1760 case VKI_XEN_DOMCTL_getvcpuextstate:
1761 if (domctl->u.vcpuextstate.buffer.p)
1762 POST_MEM_WRITE((Addr)domctl->u.vcpuextstate.buffer.p,
1763 domctl->u.vcpuextstate.size);
1764 break;
1766 case VKI_XEN_DOMCTL_shadow_op:
1767 switch(domctl->u.shadow_op.op)
1769 case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
1770 case VKI_XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
1771 /* No outputs */
1772 break;
1774 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
1775 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
1776 POST_XEN_DOMCTL_WRITE(shadow_op, pages);
1777 POST_XEN_DOMCTL_WRITE(shadow_op, stats.fault_count);
1778 POST_XEN_DOMCTL_WRITE(shadow_op, stats.dirty_count);
1779 if(domctl->u.shadow_op.dirty_bitmap.p)
1780 POST_MEM_WRITE((Addr)domctl->u.shadow_op.dirty_bitmap.p,
1781 domctl->u.shadow_op.pages * sizeof(vki_uint8_t));
1782 break;
1784 case VKI_XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
1785 POST_XEN_DOMCTL_WRITE(shadow_op, mb);
1786 break;
1788 default:
1789 break;
1791 break;
1792 case VKI_XEN_DOMCTL_get_vcpu_msrs:
1793 if (domctl->u.vcpu_msrs.msrs.p)
1794 POST_MEM_WRITE((Addr)domctl->u.vcpu_msrs.msrs.p,
1795 sizeof(vki_xen_domctl_vcpu_msr_t) *
1796 domctl->u.vcpu_msrs.msr_count);
1797 break;
1799 case VKI_XEN_DOMCTL_mem_event_op:
1800 POST_XEN_DOMCTL_WRITE(mem_event_op, port);
1802 break;
1804 #undef POST_XEN_DOMCTL_WRITE
1805 #undef __POST_XEN_DOMCTL_WRITE
1808 POST(hvm_op)
1810 unsigned long op = ARG1;
1811 void *arg = (void *)(unsigned long)ARG2;
1813 #define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) \
1814 POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
1815 sizeof(((_type*)arg)->_field))
1816 #define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
1817 __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1819 switch (op) {
1820 case VKI_XEN_HVMOP_set_param:
1821 case VKI_XEN_HVMOP_set_pci_intx_level:
1822 case VKI_XEN_HVMOP_set_isa_irq_level:
1823 case VKI_XEN_HVMOP_set_pci_link_route:
1824 case VKI_XEN_HVMOP_set_mem_type:
1825 case VKI_XEN_HVMOP_set_mem_access:
1826 case VKI_XEN_HVMOP_inject_trap:
1827 /* No output paramters */
1828 break;
1830 case VKI_XEN_HVMOP_get_param:
1831 __POST_XEN_HVMOP_WRITE(get_param, struct vki_xen_hvm_param, value);
1832 break;
1834 case VKI_XEN_HVMOP_get_mem_access:
1835 POST_XEN_HVMOP_WRITE(get_mem_access, hvmmem_access);
1836 break;
1838 #undef __POST_XEN_HVMOP_WRITE
1839 #undef POST_XEN_HVMOP_WRITE
1842 POST(tmem_op)
1844 struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
1846 switch(tmem->cmd) {
1848 case VKI_XEN_TMEM_control:
1850 switch(tmem->u.ctrl.subop) {
1851 /* No outputs */
1852 case VKI_XEN_TMEMC_save_begin:
1853 break;
1856 break;
1860 typedef
1861 struct {
1862 SyscallTableEntry entry;
1863 int nr_args;
1865 XenHypercallTableEntry;
1867 #define HYPX_(const, name, nr_args) \
1868 [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
1869 #define HYPXY(const, name, nr_args) \
1870 [const] = { { vgSysWrap_xen_##name##_before, \
1871 vgSysWrap_xen_##name##_after }, \
1872 nr_args }
1874 static XenHypercallTableEntry hypercall_table[] = {
1875 // __VKI_XEN_set_trap_table // 0
1876 // __VKI_XEN_mmu_update // 1
1877 // __VKI_XEN_set_gdt // 2
1878 // __VKI_XEN_stack_switch // 3
1879 // __VKI_XEN_set_callbacks // 4
1881 // __VKI_XEN_fpu_taskswitch // 5
1882 // __VKI_XEN_sched_op_compat // 6
1883 // __VKI_XEN_platform_op // 7
1884 // __VKI_XEN_set_debugreg // 8
1885 // __VKI_XEN_get_debugreg // 9
1887 // __VKI_XEN_update_descriptor // 10
1888 // // 11
1889 HYPXY(__VKI_XEN_memory_op, memory_op, 2), // 12
1890 // __VKI_XEN_multicall // 13
1891 // __VKI_XEN_update_va_mapping // 14
1893 // __VKI_XEN_set_timer_op // 15
1894 HYPXY(__VKI_XEN_event_channel_op_compat, evtchn_op_compat, 1), // 16
1895 HYPXY(__VKI_XEN_xen_version, xen_version, 2), // 17
1896 // __VKI_XEN_console_io // 18
1897 // __VKI_XEN_physdev_op_compat // 19
1899 HYPXY(__VKI_XEN_grant_table_op, grant_table_op, 3), // 20
1900 // __VKI_XEN_vm_assist // 21
1901 // __VKI_XEN_update_va_mapping_otherdomain // 22
1902 // __VKI_XEN_iret, iret // 23
1903 // __VKI_XEN_vcpu_op, vcpu_op // 24
1905 // __VKI_XEN_set_segment_base // 25
1906 HYPXY(__VKI_XEN_mmuext_op, mmuext_op, 2), // 26
1907 // __VKI_XEN_xsm_op // 27
1908 // __VKI_XEN_nmi_op // 28
1909 HYPXY(__VKI_XEN_sched_op, sched_op, 2), // 29
1911 // __VKI_XEN_callback_op // 30
1912 // __VKI_XEN_xenoprof_op // 31
1913 HYPXY(__VKI_XEN_event_channel_op, evtchn_op, 2), // 32
1914 // __VKI_XEN_physdev_op // 33
1915 HYPXY(__VKI_XEN_hvm_op, hvm_op, 2), // 34
1917 HYPXY(__VKI_XEN_sysctl, sysctl, 1), // 35
1918 HYPXY(__VKI_XEN_domctl, domctl, 1), // 36
1919 // __VKI_XEN_kexec_op // 37
1920 HYPXY(__VKI_XEN_tmem_op, tmem_op, 1), // 38
1923 static void bad_before ( ThreadId tid,
1924 SyscallArgLayout* layout,
1925 /*MOD*/SyscallArgs* args,
1926 /*OUT*/SyscallStatus* status,
1927 /*OUT*/UWord* flags )
1929 VG_(dmsg)("WARNING: unhandled hypercall: %s\n",
1930 VG_SYSNUM_STRING(args->sysno));
1931 if (VG_(clo_verbosity) > 1) {
1932 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
1934 VG_(dmsg)("You may be able to write your own handler.\n");
1935 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1936 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
1937 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
1938 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
1940 SET_STATUS_Failure(VKI_ENOSYS);
1943 static XenHypercallTableEntry bad_hyper =
1944 { { bad_before, NULL }, 0 };
1946 static XenHypercallTableEntry* ML_(get_xen_hypercall_entry) ( UInt sysno )
1948 XenHypercallTableEntry *ret = &bad_hyper;
1950 const UInt hypercall_table_size
1951 = sizeof(hypercall_table) / sizeof(hypercall_table[0]);
1953 /* Is it in the contiguous initial section of the table? */
1954 if (sysno < hypercall_table_size) {
1955 XenHypercallTableEntry* ent = &hypercall_table[sysno];
1956 if (ent->entry.before != NULL)
1957 ret = ent;
1960 /* Can't find a wrapper */
1961 return ret;
1964 DEFN_PRE_TEMPLATE(xen, hypercall)
1966 XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
1968 /* Return number of arguments consumed */
1969 ARG8 = ent->nr_args;
1971 vg_assert(ent);
1972 vg_assert(ent->entry.before);
1973 (ent->entry.before)( tid, layout, arrghs, status, flags );
1977 DEFN_POST_TEMPLATE(xen, hypercall)
1979 XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
1981 /* Return number of arguments consumed */
1982 ARG8 = ent->nr_args;
1984 vg_assert(ent);
1985 if (ent->entry.after)
1986 (ent->entry.after)( tid, arrghs, status );
1989 #endif // defined(ENABLE_XEN)