2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 * inlining. The inline versions are the same as the direct-use
4 * versions, with the pre- and post-amble chopped off.
6 * This code is encoded for size rather than absolute efficiency, with
7 * a view to being able to inline as much as possible.
9 * We only bother with direct forms (ie, vcpu in pda) of the
10 * operations here; the indirect forms are better handled in C, since
11 * they're generally too large to inline anyway.
14 #include <asm/thread_info.h>
15 #include <asm/processor-flags.h>
16 #include <asm/segment.h>
18 #include <xen/interface/xen.h>
23 * Force an event check by making a hypercall, but preserve regs
24 * before making the call.
30 call xen_force_evtchn_callback
37 * We can't use sysexit directly, because we're not running in ring0.
38 * But we can easily fake it up using iret. Assuming xen_sysexit is
39 * jumped to with a standard stack frame, we can just strip it back to
40 * a standard iret frame and use iret.
43 movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
44 orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
45 lea PT_EIP(%esp), %esp
51 * This is run where a normal iret would be run, with the same stack setup:
56 * This attempts to make sure that any pending events are dealt with
57 * on return to usermode, but there is a small window in which an
58 * event can happen just before entering usermode. If the nested
59 * interrupt ends up setting one of the TIF_WORK_MASK pending work
60 * flags, they will not be tested again before returning to
61 * usermode. This means that a process can end up with pending work,
62 * which will be unprocessed until the process enters and leaves the
63 * kernel again, which could be an unbounded amount of time. This
64 * means that a pending signal or reschedule event could be
65 * indefinitely delayed.
67 * The fix is to notice a nested interrupt in the critical window, and
68 * if one occurs, then fold the nested interrupt into the current
69 * interrupt stack frame, and re-process it iteratively rather than
70 * recursively. This means that it will exit via the normal path, and
71 * all pending work will be dealt with appropriately.
73 * Because the nested interrupt handler needs to deal with the current
74 * stack state in whatever form its in, we keep things simple by only
75 * using a single register which is pushed/popped on the stack.
78 /* test eflags for special cases */
79 testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
83 ESP_OFFSET=4 # bytes pushed onto stack
86 * Store vcpu_info pointer for easy access. Do it this way to
87 * avoid having to reload %fs
91 movl TI_cpu(%eax), %eax
92 movl __per_cpu_offset(,%eax,4), %eax
93 mov xen_vcpu(%eax), %eax
98 /* check IF state we're restoring */
99 testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
102 * Maybe enable events. Once this happens we could get a
103 * recursive event, so the critical region starts immediately
104 * afterwards. However, if that happens we don't end up
105 * resuming the code, so we don't have to be worried about
106 * being preempted to another CPU.
108 setz XEN_vcpu_info_mask(%eax)
111 /* check for unmasked and pending */
112 cmpw $0x0001, XEN_vcpu_info_pending(%eax)
115 * If there's something pending, mask events again so we can
116 * jump back into xen_hypervisor_callback
118 sete XEN_vcpu_info_mask(%eax)
123 * From this point on the registers are restored and the stack
124 * updated, so we don't need to worry about it if we're
130 * Jump to hypervisor_callback after fixing up the stack.
131 * Events are masked, so jumping out of the critical region is
134 je xen_hypervisor_callback
138 .section __ex_table, "a"
144 /* put this out of line since its very rarely used */
145 jmp hypercall_page + __HYPERVISOR_iret * 32
147 .globl xen_iret_start_crit, xen_iret_end_crit
150 * This is called by xen_hypervisor_callback in entry.S when it sees
151 * that the EIP at the time of interrupt was between
152 * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in
153 * %eax so we can do a more refined determination of what to do.
155 * The stack format at this point is:
157 * ss : (ss/esp may be present if we came from usermode)
159 * eflags } outer exception info
162 * ---------------- <- edi (copy dest)
163 * eax : outer eax if it hasn't been restored
165 * eflags } nested exception info
166 * cs } (no ss/esp because we're nested
167 * eip } from the same ring)
168 * orig_eax }<- esi (copy src)
172 * ds } SAVE_ALL state
178 * In order to deliver the nested exception properly, we need to shift
179 * everything from the return addr up to the error code so it sits
180 * just under the outer exception info. This means that when we
181 * handle the exception, we do it in the context of the outer
182 * exception rather than starting a new one.
184 * The only caveat is that if the outer eax hasn't been restored yet
185 * (ie, it's still on stack), we need to insert its value into the
186 * SAVE_ALL state before going on, since it's usermode state which we
187 * eventually need to restore.
189 ENTRY(xen_iret_crit_fixup)
191 * Paranoia: Make sure we're really coming from kernel space.
192 * One could imagine a case where userspace jumps into the
193 * critical range address, but just before the CPU delivers a
194 * GP, it decides to deliver an interrupt instead. Unlikely?
195 * Definitely. Easy to avoid? Yes. The Intel documents
196 * explicitly say that the reported EIP for a bad jump is the
197 * jump instruction itself, not the destination, but some
198 * virtual environments get this wrong.
200 movl PT_CS(%esp), %ecx
201 andl $SEGMENT_RPL_MASK, %ecx
205 lea PT_ORIG_EAX(%esp), %esi
206 lea PT_EFLAGS(%esp), %edi
209 * If eip is before iret_restore_end then stack
210 * hasn't been restored yet.
212 cmp $iret_restore_end, %eax
215 movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */
216 movl %eax, PT_EAX(%esp)
218 lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */
220 /* set up the copy */
222 mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */
226 lea 4(%edi), %esp /* point esp to new frame */