1 /******************************************************************************
4 * Linux-specific hypervisor handling.
6 * Copyright (c) 2002-2004, K A Fraser
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #ifndef _ASM_X86_XEN_HYPERCALL_H
34 #define _ASM_X86_XEN_HYPERCALL_H
36 #include <linux/kernel.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
42 #include <trace/events/xen.h>
45 #include <asm/pgtable.h>
47 #include <asm/nospec-branch.h>
49 #include <xen/interface/xen.h>
50 #include <xen/interface/sched.h>
51 #include <xen/interface/physdev.h>
52 #include <xen/interface/platform.h>
53 #include <xen/interface/xen-mca.h>
58 * The hypercall asms have to meet several constraints:
59 * - Work on 32- and 64-bit.
60 * The two architectures put their arguments in different sets of
63 * - Work around asm syntax quirks
64 * It isn't possible to specify one of the rNN registers in a
65 * constraint, so we use explicit register variables to get the
66 * args into the right place.
68 * - Mark all registers as potentially clobbered
69 * Even unused parameters can be clobbered by the hypervisor, so we
70 * need to make sure gcc knows it.
72 * - Avoid compiler bugs.
73 * This is the tricky part. Because x86_32 has such a constrained
74 * register set, gcc versions below 4.3 have trouble generating
75 * code when all the arg registers and memory are trashed by the
76 * asm. There are syntactically simpler ways of achieving the
77 * semantics below, but they cause the compiler to crash.
79 * The only combination I found which works is:
80 * - assign the __argX variables first
81 * - list all actually used parameters as "+r" (__argX)
84 * The result certainly isn't pretty, and it really shows up cpp's
85 * weakness as as macro language. Sorry. (But let's just give thanks
86 * there aren't more than 5 arguments...)
89 extern struct { char _entry
[32]; } hypercall_page
[];
91 #define __HYPERCALL "call hypercall_page+%c[offset]"
92 #define __HYPERCALL_ENTRY(x) \
93 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
96 #define __HYPERCALL_RETREG "eax"
97 #define __HYPERCALL_ARG1REG "ebx"
98 #define __HYPERCALL_ARG2REG "ecx"
99 #define __HYPERCALL_ARG3REG "edx"
100 #define __HYPERCALL_ARG4REG "esi"
101 #define __HYPERCALL_ARG5REG "edi"
103 #define __HYPERCALL_RETREG "rax"
104 #define __HYPERCALL_ARG1REG "rdi"
105 #define __HYPERCALL_ARG2REG "rsi"
106 #define __HYPERCALL_ARG3REG "rdx"
107 #define __HYPERCALL_ARG4REG "r10"
108 #define __HYPERCALL_ARG5REG "r8"
111 #define __HYPERCALL_DECLS \
112 register unsigned long __res asm(__HYPERCALL_RETREG); \
113 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
114 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
115 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
116 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
117 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
119 #define __HYPERCALL_0PARAM "=r" (__res), ASM_CALL_CONSTRAINT
120 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
121 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
122 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
123 #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
124 #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
126 #define __HYPERCALL_0ARG()
127 #define __HYPERCALL_1ARG(a1) \
128 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
129 #define __HYPERCALL_2ARG(a1,a2) \
130 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
131 #define __HYPERCALL_3ARG(a1,a2,a3) \
132 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
133 #define __HYPERCALL_4ARG(a1,a2,a3,a4) \
134 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
135 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
136 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
138 #define __HYPERCALL_CLOBBER5 "memory"
139 #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
140 #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
141 #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
142 #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
143 #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
145 #define _hypercall0(type, name) \
148 __HYPERCALL_0ARG(); \
149 asm volatile (__HYPERCALL \
150 : __HYPERCALL_0PARAM \
151 : __HYPERCALL_ENTRY(name) \
152 : __HYPERCALL_CLOBBER0); \
156 #define _hypercall1(type, name, a1) \
159 __HYPERCALL_1ARG(a1); \
160 asm volatile (__HYPERCALL \
161 : __HYPERCALL_1PARAM \
162 : __HYPERCALL_ENTRY(name) \
163 : __HYPERCALL_CLOBBER1); \
167 #define _hypercall2(type, name, a1, a2) \
170 __HYPERCALL_2ARG(a1, a2); \
171 asm volatile (__HYPERCALL \
172 : __HYPERCALL_2PARAM \
173 : __HYPERCALL_ENTRY(name) \
174 : __HYPERCALL_CLOBBER2); \
178 #define _hypercall3(type, name, a1, a2, a3) \
181 __HYPERCALL_3ARG(a1, a2, a3); \
182 asm volatile (__HYPERCALL \
183 : __HYPERCALL_3PARAM \
184 : __HYPERCALL_ENTRY(name) \
185 : __HYPERCALL_CLOBBER3); \
189 #define _hypercall4(type, name, a1, a2, a3, a4) \
192 __HYPERCALL_4ARG(a1, a2, a3, a4); \
193 asm volatile (__HYPERCALL \
194 : __HYPERCALL_4PARAM \
195 : __HYPERCALL_ENTRY(name) \
196 : __HYPERCALL_CLOBBER4); \
200 #define _hypercall5(type, name, a1, a2, a3, a4, a5) \
203 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
204 asm volatile (__HYPERCALL \
205 : __HYPERCALL_5PARAM \
206 : __HYPERCALL_ENTRY(name) \
207 : __HYPERCALL_CLOBBER5); \
212 privcmd_call(unsigned call
,
213 unsigned long a1
, unsigned long a2
,
214 unsigned long a3
, unsigned long a4
,
218 __HYPERCALL_5ARG(a1
, a2
, a3
, a4
, a5
);
221 asm volatile(CALL_NOSPEC
223 : [thunk_target
] "a" (&hypercall_page
[call
])
224 : __HYPERCALL_CLOBBER5
);
231 HYPERVISOR_set_trap_table(struct trap_info
*table
)
233 return _hypercall1(int, set_trap_table
, table
);
237 HYPERVISOR_mmu_update(struct mmu_update
*req
, int count
,
238 int *success_count
, domid_t domid
)
240 return _hypercall4(int, mmu_update
, req
, count
, success_count
, domid
);
244 HYPERVISOR_mmuext_op(struct mmuext_op
*op
, int count
,
245 int *success_count
, domid_t domid
)
247 return _hypercall4(int, mmuext_op
, op
, count
, success_count
, domid
);
251 HYPERVISOR_set_gdt(unsigned long *frame_list
, int entries
)
253 return _hypercall2(int, set_gdt
, frame_list
, entries
);
257 HYPERVISOR_stack_switch(unsigned long ss
, unsigned long esp
)
259 return _hypercall2(int, stack_switch
, ss
, esp
);
264 HYPERVISOR_set_callbacks(unsigned long event_selector
,
265 unsigned long event_address
,
266 unsigned long failsafe_selector
,
267 unsigned long failsafe_address
)
269 return _hypercall4(int, set_callbacks
,
270 event_selector
, event_address
,
271 failsafe_selector
, failsafe_address
);
273 #else /* CONFIG_X86_64 */
275 HYPERVISOR_set_callbacks(unsigned long event_address
,
276 unsigned long failsafe_address
,
277 unsigned long syscall_address
)
279 return _hypercall3(int, set_callbacks
,
280 event_address
, failsafe_address
,
283 #endif /* CONFIG_X86_{32,64} */
286 HYPERVISOR_callback_op(int cmd
, void *arg
)
288 return _hypercall2(int, callback_op
, cmd
, arg
);
292 HYPERVISOR_fpu_taskswitch(int set
)
294 return _hypercall1(int, fpu_taskswitch
, set
);
298 HYPERVISOR_sched_op(int cmd
, void *arg
)
300 return _hypercall2(int, sched_op
, cmd
, arg
);
304 HYPERVISOR_set_timer_op(u64 timeout
)
306 unsigned long timeout_hi
= (unsigned long)(timeout
>>32);
307 unsigned long timeout_lo
= (unsigned long)timeout
;
308 return _hypercall2(long, set_timer_op
, timeout_lo
, timeout_hi
);
312 HYPERVISOR_mca(struct xen_mc
*mc_op
)
314 mc_op
->interface_version
= XEN_MCA_INTERFACE_VERSION
;
315 return _hypercall1(int, mca
, mc_op
);
319 HYPERVISOR_platform_op(struct xen_platform_op
*op
)
321 op
->interface_version
= XENPF_INTERFACE_VERSION
;
322 return _hypercall1(int, platform_op
, op
);
326 HYPERVISOR_set_debugreg(int reg
, unsigned long value
)
328 return _hypercall2(int, set_debugreg
, reg
, value
);
331 static inline unsigned long
332 HYPERVISOR_get_debugreg(int reg
)
334 return _hypercall1(unsigned long, get_debugreg
, reg
);
338 HYPERVISOR_update_descriptor(u64 ma
, u64 desc
)
340 if (sizeof(u64
) == sizeof(long))
341 return _hypercall2(int, update_descriptor
, ma
, desc
);
342 return _hypercall4(int, update_descriptor
, ma
, ma
>>32, desc
, desc
>>32);
346 HYPERVISOR_memory_op(unsigned int cmd
, void *arg
)
348 return _hypercall2(long, memory_op
, cmd
, arg
);
352 HYPERVISOR_multicall(void *call_list
, uint32_t nr_calls
)
354 return _hypercall2(int, multicall
, call_list
, nr_calls
);
358 HYPERVISOR_update_va_mapping(unsigned long va
, pte_t new_val
,
361 if (sizeof(new_val
) == sizeof(long))
362 return _hypercall3(int, update_va_mapping
, va
,
365 return _hypercall4(int, update_va_mapping
, va
,
366 new_val
.pte
, new_val
.pte
>> 32, flags
);
368 extern int __must_check
xen_event_channel_op_compat(int, void *);
371 HYPERVISOR_event_channel_op(int cmd
, void *arg
)
373 int rc
= _hypercall2(int, event_channel_op
, cmd
, arg
);
374 if (unlikely(rc
== -ENOSYS
))
375 rc
= xen_event_channel_op_compat(cmd
, arg
);
380 HYPERVISOR_xen_version(int cmd
, void *arg
)
382 return _hypercall2(int, xen_version
, cmd
, arg
);
386 HYPERVISOR_console_io(int cmd
, int count
, char *str
)
388 return _hypercall3(int, console_io
, cmd
, count
, str
);
391 extern int __must_check
xen_physdev_op_compat(int, void *);
394 HYPERVISOR_physdev_op(int cmd
, void *arg
)
396 int rc
= _hypercall2(int, physdev_op
, cmd
, arg
);
397 if (unlikely(rc
== -ENOSYS
))
398 rc
= xen_physdev_op_compat(cmd
, arg
);
403 HYPERVISOR_grant_table_op(unsigned int cmd
, void *uop
, unsigned int count
)
405 return _hypercall3(int, grant_table_op
, cmd
, uop
, count
);
409 HYPERVISOR_update_va_mapping_otherdomain(unsigned long va
, pte_t new_val
,
410 unsigned long flags
, domid_t domid
)
412 if (sizeof(new_val
) == sizeof(long))
413 return _hypercall4(int, update_va_mapping_otherdomain
, va
,
414 new_val
.pte
, flags
, domid
);
416 return _hypercall5(int, update_va_mapping_otherdomain
, va
,
417 new_val
.pte
, new_val
.pte
>> 32,
422 HYPERVISOR_vm_assist(unsigned int cmd
, unsigned int type
)
424 return _hypercall2(int, vm_assist
, cmd
, type
);
428 HYPERVISOR_vcpu_op(int cmd
, int vcpuid
, void *extra_args
)
430 return _hypercall3(int, vcpu_op
, cmd
, vcpuid
, extra_args
);
435 HYPERVISOR_set_segment_base(int reg
, unsigned long value
)
437 return _hypercall2(int, set_segment_base
, reg
, value
);
442 HYPERVISOR_suspend(unsigned long start_info_mfn
)
444 struct sched_shutdown r
= { .reason
= SHUTDOWN_suspend
};
447 * For a PV guest the tools require that the start_info mfn be
448 * present in rdx/edx when the hypercall is made. Per the
449 * hypercall calling convention this is the third hypercall
450 * argument, which is start_info_mfn here.
452 return _hypercall3(int, sched_op
, SCHEDOP_shutdown
, &r
, start_info_mfn
);
456 HYPERVISOR_nmi_op(unsigned long op
, unsigned long arg
)
458 return _hypercall2(int, nmi_op
, op
, arg
);
461 static inline unsigned long __must_check
462 HYPERVISOR_hvm_op(int op
, void *arg
)
464 return _hypercall2(unsigned long, hvm_op
, op
, arg
);
471 return _hypercall1(int, tmem_op
, op
);
475 HYPERVISOR_xenpmu_op(unsigned int op
, void *arg
)
477 return _hypercall2(int, xenpmu_op
, op
, arg
);
482 domid_t dom
, unsigned int nr_bufs
, struct xen_dm_op_buf
*bufs
)
486 ret
= _hypercall3(int, dm_op
, dom
, nr_bufs
, bufs
);
492 MULTI_fpu_taskswitch(struct multicall_entry
*mcl
, int set
)
494 mcl
->op
= __HYPERVISOR_fpu_taskswitch
;
497 trace_xen_mc_entry(mcl
, 1);
501 MULTI_update_va_mapping(struct multicall_entry
*mcl
, unsigned long va
,
502 pte_t new_val
, unsigned long flags
)
504 mcl
->op
= __HYPERVISOR_update_va_mapping
;
506 if (sizeof(new_val
) == sizeof(long)) {
507 mcl
->args
[1] = new_val
.pte
;
508 mcl
->args
[2] = flags
;
510 mcl
->args
[1] = new_val
.pte
;
511 mcl
->args
[2] = new_val
.pte
>> 32;
512 mcl
->args
[3] = flags
;
515 trace_xen_mc_entry(mcl
, sizeof(new_val
) == sizeof(long) ? 3 : 4);
519 MULTI_grant_table_op(struct multicall_entry
*mcl
, unsigned int cmd
,
520 void *uop
, unsigned int count
)
522 mcl
->op
= __HYPERVISOR_grant_table_op
;
524 mcl
->args
[1] = (unsigned long)uop
;
525 mcl
->args
[2] = count
;
527 trace_xen_mc_entry(mcl
, 3);
531 MULTI_update_va_mapping_otherdomain(struct multicall_entry
*mcl
, unsigned long va
,
532 pte_t new_val
, unsigned long flags
,
535 mcl
->op
= __HYPERVISOR_update_va_mapping_otherdomain
;
537 if (sizeof(new_val
) == sizeof(long)) {
538 mcl
->args
[1] = new_val
.pte
;
539 mcl
->args
[2] = flags
;
540 mcl
->args
[3] = domid
;
542 mcl
->args
[1] = new_val
.pte
;
543 mcl
->args
[2] = new_val
.pte
>> 32;
544 mcl
->args
[3] = flags
;
545 mcl
->args
[4] = domid
;
548 trace_xen_mc_entry(mcl
, sizeof(new_val
) == sizeof(long) ? 4 : 5);
552 MULTI_update_descriptor(struct multicall_entry
*mcl
, u64 maddr
,
553 struct desc_struct desc
)
555 mcl
->op
= __HYPERVISOR_update_descriptor
;
556 if (sizeof(maddr
) == sizeof(long)) {
557 mcl
->args
[0] = maddr
;
558 mcl
->args
[1] = *(unsigned long *)&desc
;
560 u32
*p
= (u32
*)&desc
;
562 mcl
->args
[0] = maddr
;
563 mcl
->args
[1] = maddr
>> 32;
568 trace_xen_mc_entry(mcl
, sizeof(maddr
) == sizeof(long) ? 2 : 4);
572 MULTI_memory_op(struct multicall_entry
*mcl
, unsigned int cmd
, void *arg
)
574 mcl
->op
= __HYPERVISOR_memory_op
;
576 mcl
->args
[1] = (unsigned long)arg
;
578 trace_xen_mc_entry(mcl
, 2);
582 MULTI_mmu_update(struct multicall_entry
*mcl
, struct mmu_update
*req
,
583 int count
, int *success_count
, domid_t domid
)
585 mcl
->op
= __HYPERVISOR_mmu_update
;
586 mcl
->args
[0] = (unsigned long)req
;
587 mcl
->args
[1] = count
;
588 mcl
->args
[2] = (unsigned long)success_count
;
589 mcl
->args
[3] = domid
;
591 trace_xen_mc_entry(mcl
, 4);
595 MULTI_mmuext_op(struct multicall_entry
*mcl
, struct mmuext_op
*op
, int count
,
596 int *success_count
, domid_t domid
)
598 mcl
->op
= __HYPERVISOR_mmuext_op
;
599 mcl
->args
[0] = (unsigned long)op
;
600 mcl
->args
[1] = count
;
601 mcl
->args
[2] = (unsigned long)success_count
;
602 mcl
->args
[3] = domid
;
604 trace_xen_mc_entry(mcl
, 4);
608 MULTI_set_gdt(struct multicall_entry
*mcl
, unsigned long *frames
, int entries
)
610 mcl
->op
= __HYPERVISOR_set_gdt
;
611 mcl
->args
[0] = (unsigned long)frames
;
612 mcl
->args
[1] = entries
;
614 trace_xen_mc_entry(mcl
, 2);
618 MULTI_stack_switch(struct multicall_entry
*mcl
,
619 unsigned long ss
, unsigned long esp
)
621 mcl
->op
= __HYPERVISOR_stack_switch
;
625 trace_xen_mc_entry(mcl
, 2);
628 #endif /* _ASM_X86_XEN_HYPERCALL_H */