1 /******************************************************************************
4 * Linux-specific hypervisor handling.
6 * Copyright (c) 2002-2004, K A Fraser
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #ifndef _ASM_X86_XEN_HYPERCALL_H
34 #define _ASM_X86_XEN_HYPERCALL_H
36 #include <linux/kernel.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
43 #include <asm/pgtable.h>
45 #include <xen/interface/xen.h>
46 #include <xen/interface/sched.h>
47 #include <xen/interface/physdev.h>
50 * The hypercall asms have to meet several constraints:
51 * - Work on 32- and 64-bit.
52 * The two architectures put their arguments in different sets of
55 * - Work around asm syntax quirks
56 * It isn't possible to specify one of the rNN registers in a
57 * constraint, so we use explicit register variables to get the
58 * args into the right place.
60 * - Mark all registers as potentially clobbered
61 * Even unused parameters can be clobbered by the hypervisor, so we
62 * need to make sure gcc knows it.
64 * - Avoid compiler bugs.
65 * This is the tricky part. Because x86_32 has such a constrained
66 * register set, gcc versions below 4.3 have trouble generating
67 * code when all the arg registers and memory are trashed by the
68 * asm. There are syntactically simpler ways of achieving the
69 * semantics below, but they cause the compiler to crash.
71 * The only combination I found which works is:
72 * - assign the __argX variables first
73 * - list all actually used parameters as "+r" (__argX)
76 * The result certainly isn't pretty, and it really shows up cpp's
77 * weakness as as macro language. Sorry. (But let's just give thanks
78 * there aren't more than 5 arguments...)
81 extern struct { char _entry
[32]; } hypercall_page
[];
83 #define __HYPERCALL "call hypercall_page+%c[offset]"
84 #define __HYPERCALL_ENTRY(x) \
85 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
88 #define __HYPERCALL_RETREG "eax"
89 #define __HYPERCALL_ARG1REG "ebx"
90 #define __HYPERCALL_ARG2REG "ecx"
91 #define __HYPERCALL_ARG3REG "edx"
92 #define __HYPERCALL_ARG4REG "esi"
93 #define __HYPERCALL_ARG5REG "edi"
95 #define __HYPERCALL_RETREG "rax"
96 #define __HYPERCALL_ARG1REG "rdi"
97 #define __HYPERCALL_ARG2REG "rsi"
98 #define __HYPERCALL_ARG3REG "rdx"
99 #define __HYPERCALL_ARG4REG "r10"
100 #define __HYPERCALL_ARG5REG "r8"
103 #define __HYPERCALL_DECLS \
104 register unsigned long __res asm(__HYPERCALL_RETREG); \
105 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
106 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
107 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
108 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
109 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
111 #define __HYPERCALL_0PARAM "=r" (__res)
112 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
113 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
114 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
115 #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
116 #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
118 #define __HYPERCALL_0ARG()
119 #define __HYPERCALL_1ARG(a1) \
120 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
121 #define __HYPERCALL_2ARG(a1,a2) \
122 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
123 #define __HYPERCALL_3ARG(a1,a2,a3) \
124 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
125 #define __HYPERCALL_4ARG(a1,a2,a3,a4) \
126 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
127 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
128 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
130 #define __HYPERCALL_CLOBBER5 "memory"
131 #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
132 #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
133 #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
134 #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
135 #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
137 #define _hypercall0(type, name) \
140 __HYPERCALL_0ARG(); \
141 asm volatile (__HYPERCALL \
142 : __HYPERCALL_0PARAM \
143 : __HYPERCALL_ENTRY(name) \
144 : __HYPERCALL_CLOBBER0); \
148 #define _hypercall1(type, name, a1) \
151 __HYPERCALL_1ARG(a1); \
152 asm volatile (__HYPERCALL \
153 : __HYPERCALL_1PARAM \
154 : __HYPERCALL_ENTRY(name) \
155 : __HYPERCALL_CLOBBER1); \
159 #define _hypercall2(type, name, a1, a2) \
162 __HYPERCALL_2ARG(a1, a2); \
163 asm volatile (__HYPERCALL \
164 : __HYPERCALL_2PARAM \
165 : __HYPERCALL_ENTRY(name) \
166 : __HYPERCALL_CLOBBER2); \
170 #define _hypercall3(type, name, a1, a2, a3) \
173 __HYPERCALL_3ARG(a1, a2, a3); \
174 asm volatile (__HYPERCALL \
175 : __HYPERCALL_3PARAM \
176 : __HYPERCALL_ENTRY(name) \
177 : __HYPERCALL_CLOBBER3); \
181 #define _hypercall4(type, name, a1, a2, a3, a4) \
184 __HYPERCALL_4ARG(a1, a2, a3, a4); \
185 asm volatile (__HYPERCALL \
186 : __HYPERCALL_4PARAM \
187 : __HYPERCALL_ENTRY(name) \
188 : __HYPERCALL_CLOBBER4); \
192 #define _hypercall5(type, name, a1, a2, a3, a4, a5) \
195 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
196 asm volatile (__HYPERCALL \
197 : __HYPERCALL_5PARAM \
198 : __HYPERCALL_ENTRY(name) \
199 : __HYPERCALL_CLOBBER5); \
204 privcmd_call(unsigned call
,
205 unsigned long a1
, unsigned long a2
,
206 unsigned long a3
, unsigned long a4
,
210 __HYPERCALL_5ARG(a1
, a2
, a3
, a4
, a5
);
212 asm volatile("call *%[call]"
214 : [call
] "a" (&hypercall_page
[call
])
215 : __HYPERCALL_CLOBBER5
);
221 HYPERVISOR_set_trap_table(struct trap_info
*table
)
223 return _hypercall1(int, set_trap_table
, table
);
227 HYPERVISOR_mmu_update(struct mmu_update
*req
, int count
,
228 int *success_count
, domid_t domid
)
230 return _hypercall4(int, mmu_update
, req
, count
, success_count
, domid
);
234 HYPERVISOR_mmuext_op(struct mmuext_op
*op
, int count
,
235 int *success_count
, domid_t domid
)
237 return _hypercall4(int, mmuext_op
, op
, count
, success_count
, domid
);
241 HYPERVISOR_set_gdt(unsigned long *frame_list
, int entries
)
243 return _hypercall2(int, set_gdt
, frame_list
, entries
);
247 HYPERVISOR_stack_switch(unsigned long ss
, unsigned long esp
)
249 return _hypercall2(int, stack_switch
, ss
, esp
);
254 HYPERVISOR_set_callbacks(unsigned long event_selector
,
255 unsigned long event_address
,
256 unsigned long failsafe_selector
,
257 unsigned long failsafe_address
)
259 return _hypercall4(int, set_callbacks
,
260 event_selector
, event_address
,
261 failsafe_selector
, failsafe_address
);
263 #else /* CONFIG_X86_64 */
265 HYPERVISOR_set_callbacks(unsigned long event_address
,
266 unsigned long failsafe_address
,
267 unsigned long syscall_address
)
269 return _hypercall3(int, set_callbacks
,
270 event_address
, failsafe_address
,
273 #endif /* CONFIG_X86_{32,64} */
276 HYPERVISOR_callback_op(int cmd
, void *arg
)
278 return _hypercall2(int, callback_op
, cmd
, arg
);
282 HYPERVISOR_fpu_taskswitch(int set
)
284 return _hypercall1(int, fpu_taskswitch
, set
);
288 HYPERVISOR_sched_op(int cmd
, void *arg
)
290 return _hypercall2(int, sched_op
, cmd
, arg
);
294 HYPERVISOR_set_timer_op(u64 timeout
)
296 unsigned long timeout_hi
= (unsigned long)(timeout
>>32);
297 unsigned long timeout_lo
= (unsigned long)timeout
;
298 return _hypercall2(long, set_timer_op
, timeout_lo
, timeout_hi
);
302 HYPERVISOR_set_debugreg(int reg
, unsigned long value
)
304 return _hypercall2(int, set_debugreg
, reg
, value
);
307 static inline unsigned long
308 HYPERVISOR_get_debugreg(int reg
)
310 return _hypercall1(unsigned long, get_debugreg
, reg
);
314 HYPERVISOR_update_descriptor(u64 ma
, u64 desc
)
316 if (sizeof(u64
) == sizeof(long))
317 return _hypercall2(int, update_descriptor
, ma
, desc
);
318 return _hypercall4(int, update_descriptor
, ma
, ma
>>32, desc
, desc
>>32);
322 HYPERVISOR_memory_op(unsigned int cmd
, void *arg
)
324 return _hypercall2(int, memory_op
, cmd
, arg
);
328 HYPERVISOR_multicall(void *call_list
, int nr_calls
)
330 return _hypercall2(int, multicall
, call_list
, nr_calls
);
334 HYPERVISOR_update_va_mapping(unsigned long va
, pte_t new_val
,
337 if (sizeof(new_val
) == sizeof(long))
338 return _hypercall3(int, update_va_mapping
, va
,
341 return _hypercall4(int, update_va_mapping
, va
,
342 new_val
.pte
, new_val
.pte
>> 32, flags
);
346 HYPERVISOR_event_channel_op(int cmd
, void *arg
)
348 int rc
= _hypercall2(int, event_channel_op
, cmd
, arg
);
349 if (unlikely(rc
== -ENOSYS
)) {
352 memcpy(&op
.u
, arg
, sizeof(op
.u
));
353 rc
= _hypercall1(int, event_channel_op_compat
, &op
);
354 memcpy(arg
, &op
.u
, sizeof(op
.u
));
360 HYPERVISOR_xen_version(int cmd
, void *arg
)
362 return _hypercall2(int, xen_version
, cmd
, arg
);
366 HYPERVISOR_console_io(int cmd
, int count
, char *str
)
368 return _hypercall3(int, console_io
, cmd
, count
, str
);
372 HYPERVISOR_physdev_op(int cmd
, void *arg
)
374 int rc
= _hypercall2(int, physdev_op
, cmd
, arg
);
375 if (unlikely(rc
== -ENOSYS
)) {
376 struct physdev_op op
;
378 memcpy(&op
.u
, arg
, sizeof(op
.u
));
379 rc
= _hypercall1(int, physdev_op_compat
, &op
);
380 memcpy(arg
, &op
.u
, sizeof(op
.u
));
386 HYPERVISOR_grant_table_op(unsigned int cmd
, void *uop
, unsigned int count
)
388 return _hypercall3(int, grant_table_op
, cmd
, uop
, count
);
392 HYPERVISOR_update_va_mapping_otherdomain(unsigned long va
, pte_t new_val
,
393 unsigned long flags
, domid_t domid
)
395 if (sizeof(new_val
) == sizeof(long))
396 return _hypercall4(int, update_va_mapping_otherdomain
, va
,
397 new_val
.pte
, flags
, domid
);
399 return _hypercall5(int, update_va_mapping_otherdomain
, va
,
400 new_val
.pte
, new_val
.pte
>> 32,
405 HYPERVISOR_vm_assist(unsigned int cmd
, unsigned int type
)
407 return _hypercall2(int, vm_assist
, cmd
, type
);
411 HYPERVISOR_vcpu_op(int cmd
, int vcpuid
, void *extra_args
)
413 return _hypercall3(int, vcpu_op
, cmd
, vcpuid
, extra_args
);
418 HYPERVISOR_set_segment_base(int reg
, unsigned long value
)
420 return _hypercall2(int, set_segment_base
, reg
, value
);
425 HYPERVISOR_suspend(unsigned long start_info_mfn
)
427 struct sched_shutdown r
= { .reason
= SHUTDOWN_suspend
};
430 * For a PV guest the tools require that the start_info mfn be
431 * present in rdx/edx when the hypercall is made. Per the
432 * hypercall calling convention this is the third hypercall
433 * argument, which is start_info_mfn here.
435 return _hypercall3(int, sched_op
, SCHEDOP_shutdown
, &r
, start_info_mfn
);
439 HYPERVISOR_nmi_op(unsigned long op
, unsigned long arg
)
441 return _hypercall2(int, nmi_op
, op
, arg
);
444 static inline unsigned long __must_check
445 HYPERVISOR_hvm_op(int op
, void *arg
)
447 return _hypercall2(unsigned long, hvm_op
, op
, arg
);
454 return _hypercall1(int, tmem_op
, op
);
458 MULTI_fpu_taskswitch(struct multicall_entry
*mcl
, int set
)
460 mcl
->op
= __HYPERVISOR_fpu_taskswitch
;
465 MULTI_update_va_mapping(struct multicall_entry
*mcl
, unsigned long va
,
466 pte_t new_val
, unsigned long flags
)
468 mcl
->op
= __HYPERVISOR_update_va_mapping
;
470 if (sizeof(new_val
) == sizeof(long)) {
471 mcl
->args
[1] = new_val
.pte
;
472 mcl
->args
[2] = flags
;
474 mcl
->args
[1] = new_val
.pte
;
475 mcl
->args
[2] = new_val
.pte
>> 32;
476 mcl
->args
[3] = flags
;
481 MULTI_grant_table_op(struct multicall_entry
*mcl
, unsigned int cmd
,
482 void *uop
, unsigned int count
)
484 mcl
->op
= __HYPERVISOR_grant_table_op
;
486 mcl
->args
[1] = (unsigned long)uop
;
487 mcl
->args
[2] = count
;
491 MULTI_update_va_mapping_otherdomain(struct multicall_entry
*mcl
, unsigned long va
,
492 pte_t new_val
, unsigned long flags
,
495 mcl
->op
= __HYPERVISOR_update_va_mapping_otherdomain
;
497 if (sizeof(new_val
) == sizeof(long)) {
498 mcl
->args
[1] = new_val
.pte
;
499 mcl
->args
[2] = flags
;
500 mcl
->args
[3] = domid
;
502 mcl
->args
[1] = new_val
.pte
;
503 mcl
->args
[2] = new_val
.pte
>> 32;
504 mcl
->args
[3] = flags
;
505 mcl
->args
[4] = domid
;
510 MULTI_update_descriptor(struct multicall_entry
*mcl
, u64 maddr
,
511 struct desc_struct desc
)
513 mcl
->op
= __HYPERVISOR_update_descriptor
;
514 if (sizeof(maddr
) == sizeof(long)) {
515 mcl
->args
[0] = maddr
;
516 mcl
->args
[1] = *(unsigned long *)&desc
;
518 mcl
->args
[0] = maddr
;
519 mcl
->args
[1] = maddr
>> 32;
520 mcl
->args
[2] = desc
.a
;
521 mcl
->args
[3] = desc
.b
;
526 MULTI_memory_op(struct multicall_entry
*mcl
, unsigned int cmd
, void *arg
)
528 mcl
->op
= __HYPERVISOR_memory_op
;
530 mcl
->args
[1] = (unsigned long)arg
;
534 MULTI_mmu_update(struct multicall_entry
*mcl
, struct mmu_update
*req
,
535 int count
, int *success_count
, domid_t domid
)
537 mcl
->op
= __HYPERVISOR_mmu_update
;
538 mcl
->args
[0] = (unsigned long)req
;
539 mcl
->args
[1] = count
;
540 mcl
->args
[2] = (unsigned long)success_count
;
541 mcl
->args
[3] = domid
;
545 MULTI_mmuext_op(struct multicall_entry
*mcl
, struct mmuext_op
*op
, int count
,
546 int *success_count
, domid_t domid
)
548 mcl
->op
= __HYPERVISOR_mmuext_op
;
549 mcl
->args
[0] = (unsigned long)op
;
550 mcl
->args
[1] = count
;
551 mcl
->args
[2] = (unsigned long)success_count
;
552 mcl
->args
[3] = domid
;
556 MULTI_set_gdt(struct multicall_entry
*mcl
, unsigned long *frames
, int entries
)
558 mcl
->op
= __HYPERVISOR_set_gdt
;
559 mcl
->args
[0] = (unsigned long)frames
;
560 mcl
->args
[1] = entries
;
564 MULTI_stack_switch(struct multicall_entry
*mcl
,
565 unsigned long ss
, unsigned long esp
)
567 mcl
->op
= __HYPERVISOR_stack_switch
;
572 #endif /* _ASM_X86_XEN_HYPERCALL_H */