1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Asm versions of Xen pv-ops, suitable for direct use.
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
6 * operations here; the indirect forms are better handled in C.
9 #include <asm/asm-offsets.h>
10 #include <asm/percpu.h>
11 #include <asm/processor-flags.h>
12 #include <asm/frame.h>
15 #include <linux/linkage.h>
18 * Enable events. This clears the event mask and tests the pending
19 * event status with one and operation. If there are pending events,
20 * then enter the hypervisor to get them handled.
22 SYM_FUNC_START(xen_irq_enable_direct)
25 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
28 * Preempt here doesn't matter because that will deal with any
29 * pending interrupts. The pending check may end up being run
30 * on the wrong CPU, but that doesn't hurt.
33 /* Test for pending */
34 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
41 SYM_FUNC_END(xen_irq_enable_direct)
45 * Disabling events is simply a matter of making the event mask
48 SYM_FUNC_START(xen_irq_disable_direct)
49 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
51 SYM_FUNC_END(xen_irq_disable_direct)
54 * (xen_)save_fl is used to get the current interrupt enable status.
55 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
56 * may be set in the return value. We take advantage of this by
57 * making sure that X86_EFLAGS_IF has the right value (and other bits
58 * in that byte are 0), but other bits in the return value are
59 * undefined. We need to toggle the state of the bit, because Xen and
60 * x86 use opposite senses (mask vs enable).
62 SYM_FUNC_START(xen_save_fl_direct)
63 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
67 SYM_FUNC_END(xen_save_fl_direct)
71 * In principle the caller should be passing us a value return from
72 * xen_save_fl_direct, but for robustness sake we test only the
73 * X86_EFLAGS_IF flag rather than the whole byte. After setting the
74 * interrupt mask state, it checks for unmasked pending events and
75 * enters the hypervisor to get them delivered if so.
77 SYM_FUNC_START(xen_restore_fl_direct)
80 testw $X86_EFLAGS_IF, %di
82 testb $X86_EFLAGS_IF>>8, %ah
84 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
86 * Preempt here doesn't matter because that will deal with any
87 * pending interrupts. The pending check may end up being run
88 * on the wrong CPU, but that doesn't hurt.
91 /* check for unmasked and pending */
92 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
98 SYM_FUNC_END(xen_restore_fl_direct)
102 * Force an event check by making a hypercall, but preserve regs
103 * before making the call.
105 SYM_FUNC_START(check_events)
111 call xen_force_evtchn_callback
125 call xen_force_evtchn_callback
138 SYM_FUNC_END(check_events)
140 SYM_FUNC_START(xen_read_cr2)
142 _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
143 _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
146 SYM_FUNC_END(xen_read_cr2);
148 SYM_FUNC_START(xen_read_cr2_direct)
150 _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
153 SYM_FUNC_END(xen_read_cr2_direct);