nfsd4: typo logical vs bitwise negate for want_mask
[linux-btrfs-devel.git] / arch / x86 / include / asm / xen / hypercall.h
blob417777de5a40b9e039a039b0cd429927e37fca49
1 /******************************************************************************
2 * hypercall.h
4 * Linux-specific hypervisor handling.
6 * Copyright (c) 2002-2004, K A Fraser
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
33 #ifndef _ASM_X86_XEN_HYPERCALL_H
34 #define _ASM_X86_XEN_HYPERCALL_H
36 #include <linux/kernel.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
42 #include <trace/events/xen.h>
44 #include <asm/page.h>
45 #include <asm/pgtable.h>
47 #include <xen/interface/xen.h>
48 #include <xen/interface/sched.h>
49 #include <xen/interface/physdev.h>
52 * The hypercall asms have to meet several constraints:
53 * - Work on 32- and 64-bit.
54 * The two architectures put their arguments in different sets of
55 * registers.
57 * - Work around asm syntax quirks
58 * It isn't possible to specify one of the rNN registers in a
59 * constraint, so we use explicit register variables to get the
60 * args into the right place.
62 * - Mark all registers as potentially clobbered
63 * Even unused parameters can be clobbered by the hypervisor, so we
64 * need to make sure gcc knows it.
66 * - Avoid compiler bugs.
67 * This is the tricky part. Because x86_32 has such a constrained
68 * register set, gcc versions below 4.3 have trouble generating
69 * code when all the arg registers and memory are trashed by the
70 * asm. There are syntactically simpler ways of achieving the
71 * semantics below, but they cause the compiler to crash.
73 * The only combination I found which works is:
74 * - assign the __argX variables first
75 * - list all actually used parameters as "+r" (__argX)
76 * - clobber the rest
78 * The result certainly isn't pretty, and it really shows up cpp's
79 * weakness as as macro language. Sorry. (But let's just give thanks
80 * there aren't more than 5 arguments...)
83 extern struct { char _entry[32]; } hypercall_page[];
85 #define __HYPERCALL "call hypercall_page+%c[offset]"
86 #define __HYPERCALL_ENTRY(x) \
87 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
89 #ifdef CONFIG_X86_32
90 #define __HYPERCALL_RETREG "eax"
91 #define __HYPERCALL_ARG1REG "ebx"
92 #define __HYPERCALL_ARG2REG "ecx"
93 #define __HYPERCALL_ARG3REG "edx"
94 #define __HYPERCALL_ARG4REG "esi"
95 #define __HYPERCALL_ARG5REG "edi"
96 #else
97 #define __HYPERCALL_RETREG "rax"
98 #define __HYPERCALL_ARG1REG "rdi"
99 #define __HYPERCALL_ARG2REG "rsi"
100 #define __HYPERCALL_ARG3REG "rdx"
101 #define __HYPERCALL_ARG4REG "r10"
102 #define __HYPERCALL_ARG5REG "r8"
103 #endif
105 #define __HYPERCALL_DECLS \
106 register unsigned long __res asm(__HYPERCALL_RETREG); \
107 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
108 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
109 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
110 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
111 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
113 #define __HYPERCALL_0PARAM "=r" (__res)
114 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
115 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
116 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
117 #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
118 #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
120 #define __HYPERCALL_0ARG()
121 #define __HYPERCALL_1ARG(a1) \
122 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
123 #define __HYPERCALL_2ARG(a1,a2) \
124 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
125 #define __HYPERCALL_3ARG(a1,a2,a3) \
126 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
127 #define __HYPERCALL_4ARG(a1,a2,a3,a4) \
128 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
129 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
130 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
132 #define __HYPERCALL_CLOBBER5 "memory"
133 #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
134 #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
135 #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
136 #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
137 #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
139 #define _hypercall0(type, name) \
140 ({ \
141 __HYPERCALL_DECLS; \
142 __HYPERCALL_0ARG(); \
143 asm volatile (__HYPERCALL \
144 : __HYPERCALL_0PARAM \
145 : __HYPERCALL_ENTRY(name) \
146 : __HYPERCALL_CLOBBER0); \
147 (type)__res; \
150 #define _hypercall1(type, name, a1) \
151 ({ \
152 __HYPERCALL_DECLS; \
153 __HYPERCALL_1ARG(a1); \
154 asm volatile (__HYPERCALL \
155 : __HYPERCALL_1PARAM \
156 : __HYPERCALL_ENTRY(name) \
157 : __HYPERCALL_CLOBBER1); \
158 (type)__res; \
161 #define _hypercall2(type, name, a1, a2) \
162 ({ \
163 __HYPERCALL_DECLS; \
164 __HYPERCALL_2ARG(a1, a2); \
165 asm volatile (__HYPERCALL \
166 : __HYPERCALL_2PARAM \
167 : __HYPERCALL_ENTRY(name) \
168 : __HYPERCALL_CLOBBER2); \
169 (type)__res; \
172 #define _hypercall3(type, name, a1, a2, a3) \
173 ({ \
174 __HYPERCALL_DECLS; \
175 __HYPERCALL_3ARG(a1, a2, a3); \
176 asm volatile (__HYPERCALL \
177 : __HYPERCALL_3PARAM \
178 : __HYPERCALL_ENTRY(name) \
179 : __HYPERCALL_CLOBBER3); \
180 (type)__res; \
183 #define _hypercall4(type, name, a1, a2, a3, a4) \
184 ({ \
185 __HYPERCALL_DECLS; \
186 __HYPERCALL_4ARG(a1, a2, a3, a4); \
187 asm volatile (__HYPERCALL \
188 : __HYPERCALL_4PARAM \
189 : __HYPERCALL_ENTRY(name) \
190 : __HYPERCALL_CLOBBER4); \
191 (type)__res; \
194 #define _hypercall5(type, name, a1, a2, a3, a4, a5) \
195 ({ \
196 __HYPERCALL_DECLS; \
197 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
198 asm volatile (__HYPERCALL \
199 : __HYPERCALL_5PARAM \
200 : __HYPERCALL_ENTRY(name) \
201 : __HYPERCALL_CLOBBER5); \
202 (type)__res; \
205 static inline long
206 privcmd_call(unsigned call,
207 unsigned long a1, unsigned long a2,
208 unsigned long a3, unsigned long a4,
209 unsigned long a5)
211 __HYPERCALL_DECLS;
212 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
214 asm volatile("call *%[call]"
215 : __HYPERCALL_5PARAM
216 : [call] "a" (&hypercall_page[call])
217 : __HYPERCALL_CLOBBER5);
219 return (long)__res;
222 static inline int
223 HYPERVISOR_set_trap_table(struct trap_info *table)
225 return _hypercall1(int, set_trap_table, table);
228 static inline int
229 HYPERVISOR_mmu_update(struct mmu_update *req, int count,
230 int *success_count, domid_t domid)
232 return _hypercall4(int, mmu_update, req, count, success_count, domid);
235 static inline int
236 HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
237 int *success_count, domid_t domid)
239 return _hypercall4(int, mmuext_op, op, count, success_count, domid);
242 static inline int
243 HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
245 return _hypercall2(int, set_gdt, frame_list, entries);
248 static inline int
249 HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
251 return _hypercall2(int, stack_switch, ss, esp);
254 #ifdef CONFIG_X86_32
255 static inline int
256 HYPERVISOR_set_callbacks(unsigned long event_selector,
257 unsigned long event_address,
258 unsigned long failsafe_selector,
259 unsigned long failsafe_address)
261 return _hypercall4(int, set_callbacks,
262 event_selector, event_address,
263 failsafe_selector, failsafe_address);
265 #else /* CONFIG_X86_64 */
266 static inline int
267 HYPERVISOR_set_callbacks(unsigned long event_address,
268 unsigned long failsafe_address,
269 unsigned long syscall_address)
271 return _hypercall3(int, set_callbacks,
272 event_address, failsafe_address,
273 syscall_address);
275 #endif /* CONFIG_X86_{32,64} */
277 static inline int
278 HYPERVISOR_callback_op(int cmd, void *arg)
280 return _hypercall2(int, callback_op, cmd, arg);
283 static inline int
284 HYPERVISOR_fpu_taskswitch(int set)
286 return _hypercall1(int, fpu_taskswitch, set);
289 static inline int
290 HYPERVISOR_sched_op(int cmd, void *arg)
292 return _hypercall2(int, sched_op, cmd, arg);
295 static inline long
296 HYPERVISOR_set_timer_op(u64 timeout)
298 unsigned long timeout_hi = (unsigned long)(timeout>>32);
299 unsigned long timeout_lo = (unsigned long)timeout;
300 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
303 static inline int
304 HYPERVISOR_set_debugreg(int reg, unsigned long value)
306 return _hypercall2(int, set_debugreg, reg, value);
309 static inline unsigned long
310 HYPERVISOR_get_debugreg(int reg)
312 return _hypercall1(unsigned long, get_debugreg, reg);
315 static inline int
316 HYPERVISOR_update_descriptor(u64 ma, u64 desc)
318 if (sizeof(u64) == sizeof(long))
319 return _hypercall2(int, update_descriptor, ma, desc);
320 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
323 static inline int
324 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
326 return _hypercall2(int, memory_op, cmd, arg);
329 static inline int
330 HYPERVISOR_multicall(void *call_list, int nr_calls)
332 return _hypercall2(int, multicall, call_list, nr_calls);
335 static inline int
336 HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
337 unsigned long flags)
339 if (sizeof(new_val) == sizeof(long))
340 return _hypercall3(int, update_va_mapping, va,
341 new_val.pte, flags);
342 else
343 return _hypercall4(int, update_va_mapping, va,
344 new_val.pte, new_val.pte >> 32, flags);
347 static inline int
348 HYPERVISOR_event_channel_op(int cmd, void *arg)
350 int rc = _hypercall2(int, event_channel_op, cmd, arg);
351 if (unlikely(rc == -ENOSYS)) {
352 struct evtchn_op op;
353 op.cmd = cmd;
354 memcpy(&op.u, arg, sizeof(op.u));
355 rc = _hypercall1(int, event_channel_op_compat, &op);
356 memcpy(arg, &op.u, sizeof(op.u));
358 return rc;
361 static inline int
362 HYPERVISOR_xen_version(int cmd, void *arg)
364 return _hypercall2(int, xen_version, cmd, arg);
367 static inline int
368 HYPERVISOR_console_io(int cmd, int count, char *str)
370 return _hypercall3(int, console_io, cmd, count, str);
373 static inline int
374 HYPERVISOR_physdev_op(int cmd, void *arg)
376 int rc = _hypercall2(int, physdev_op, cmd, arg);
377 if (unlikely(rc == -ENOSYS)) {
378 struct physdev_op op;
379 op.cmd = cmd;
380 memcpy(&op.u, arg, sizeof(op.u));
381 rc = _hypercall1(int, physdev_op_compat, &op);
382 memcpy(arg, &op.u, sizeof(op.u));
384 return rc;
387 static inline int
388 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
390 return _hypercall3(int, grant_table_op, cmd, uop, count);
393 static inline int
394 HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
395 unsigned long flags, domid_t domid)
397 if (sizeof(new_val) == sizeof(long))
398 return _hypercall4(int, update_va_mapping_otherdomain, va,
399 new_val.pte, flags, domid);
400 else
401 return _hypercall5(int, update_va_mapping_otherdomain, va,
402 new_val.pte, new_val.pte >> 32,
403 flags, domid);
406 static inline int
407 HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
409 return _hypercall2(int, vm_assist, cmd, type);
412 static inline int
413 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
415 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
418 #ifdef CONFIG_X86_64
419 static inline int
420 HYPERVISOR_set_segment_base(int reg, unsigned long value)
422 return _hypercall2(int, set_segment_base, reg, value);
424 #endif
426 static inline int
427 HYPERVISOR_suspend(unsigned long start_info_mfn)
429 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
432 * For a PV guest the tools require that the start_info mfn be
433 * present in rdx/edx when the hypercall is made. Per the
434 * hypercall calling convention this is the third hypercall
435 * argument, which is start_info_mfn here.
437 return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
440 static inline int
441 HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
443 return _hypercall2(int, nmi_op, op, arg);
446 static inline unsigned long __must_check
447 HYPERVISOR_hvm_op(int op, void *arg)
449 return _hypercall2(unsigned long, hvm_op, op, arg);
452 static inline int
453 HYPERVISOR_tmem_op(
454 struct tmem_op *op)
456 return _hypercall1(int, tmem_op, op);
459 static inline void
460 MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
462 mcl->op = __HYPERVISOR_fpu_taskswitch;
463 mcl->args[0] = set;
465 trace_xen_mc_entry(mcl, 1);
468 static inline void
469 MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
470 pte_t new_val, unsigned long flags)
472 mcl->op = __HYPERVISOR_update_va_mapping;
473 mcl->args[0] = va;
474 if (sizeof(new_val) == sizeof(long)) {
475 mcl->args[1] = new_val.pte;
476 mcl->args[2] = flags;
477 } else {
478 mcl->args[1] = new_val.pte;
479 mcl->args[2] = new_val.pte >> 32;
480 mcl->args[3] = flags;
483 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
486 static inline void
487 MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
488 void *uop, unsigned int count)
490 mcl->op = __HYPERVISOR_grant_table_op;
491 mcl->args[0] = cmd;
492 mcl->args[1] = (unsigned long)uop;
493 mcl->args[2] = count;
495 trace_xen_mc_entry(mcl, 3);
498 static inline void
499 MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
500 pte_t new_val, unsigned long flags,
501 domid_t domid)
503 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
504 mcl->args[0] = va;
505 if (sizeof(new_val) == sizeof(long)) {
506 mcl->args[1] = new_val.pte;
507 mcl->args[2] = flags;
508 mcl->args[3] = domid;
509 } else {
510 mcl->args[1] = new_val.pte;
511 mcl->args[2] = new_val.pte >> 32;
512 mcl->args[3] = flags;
513 mcl->args[4] = domid;
516 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 4 : 5);
519 static inline void
520 MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
521 struct desc_struct desc)
523 mcl->op = __HYPERVISOR_update_descriptor;
524 if (sizeof(maddr) == sizeof(long)) {
525 mcl->args[0] = maddr;
526 mcl->args[1] = *(unsigned long *)&desc;
527 } else {
528 mcl->args[0] = maddr;
529 mcl->args[1] = maddr >> 32;
530 mcl->args[2] = desc.a;
531 mcl->args[3] = desc.b;
534 trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
537 static inline void
538 MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
540 mcl->op = __HYPERVISOR_memory_op;
541 mcl->args[0] = cmd;
542 mcl->args[1] = (unsigned long)arg;
544 trace_xen_mc_entry(mcl, 2);
547 static inline void
548 MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
549 int count, int *success_count, domid_t domid)
551 mcl->op = __HYPERVISOR_mmu_update;
552 mcl->args[0] = (unsigned long)req;
553 mcl->args[1] = count;
554 mcl->args[2] = (unsigned long)success_count;
555 mcl->args[3] = domid;
557 trace_xen_mc_entry(mcl, 4);
560 static inline void
561 MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
562 int *success_count, domid_t domid)
564 mcl->op = __HYPERVISOR_mmuext_op;
565 mcl->args[0] = (unsigned long)op;
566 mcl->args[1] = count;
567 mcl->args[2] = (unsigned long)success_count;
568 mcl->args[3] = domid;
570 trace_xen_mc_entry(mcl, 4);
573 static inline void
574 MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
576 mcl->op = __HYPERVISOR_set_gdt;
577 mcl->args[0] = (unsigned long)frames;
578 mcl->args[1] = entries;
580 trace_xen_mc_entry(mcl, 2);
583 static inline void
584 MULTI_stack_switch(struct multicall_entry *mcl,
585 unsigned long ss, unsigned long esp)
587 mcl->op = __HYPERVISOR_stack_switch;
588 mcl->args[0] = ss;
589 mcl->args[1] = esp;
591 trace_xen_mc_entry(mcl, 2);
594 #endif /* _ASM_X86_XEN_HYPERCALL_H */