mm: revert "page-writeback.c: subtract min_free_kbytes from dirtyable memory"
[linux/fpc-iii.git] / arch / x86 / xen / irq.c
blob0da7f863056ff05fab1d31dc01e708bdc008badb
1 #include <linux/hardirq.h>
3 #include <asm/x86_init.h>
5 #include <xen/interface/xen.h>
6 #include <xen/interface/sched.h>
7 #include <xen/interface/vcpu.h>
8 #include <xen/events.h>
10 #include <asm/xen/hypercall.h>
11 #include <asm/xen/hypervisor.h>
13 #include "xen-ops.h"
16 * Force a proper event-channel callback from Xen after clearing the
17 * callback mask. We do this in a very simple manner, by making a call
18 * down into Xen. The pending flag will be checked by Xen on return.
20 void xen_force_evtchn_callback(void)
22 (void)HYPERVISOR_xen_version(0, NULL);
25 static unsigned long xen_save_fl(void)
27 struct vcpu_info *vcpu;
28 unsigned long flags;
30 vcpu = this_cpu_read(xen_vcpu);
32 /* flag has opposite sense of mask */
33 flags = !vcpu->evtchn_upcall_mask;
35 /* convert to IF type flag
36 -0 -> 0x00000000
37 -1 -> 0xffffffff
39 return (-flags) & X86_EFLAGS_IF;
41 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
43 static void xen_restore_fl(unsigned long flags)
45 struct vcpu_info *vcpu;
47 /* convert from IF type flag */
48 flags = !(flags & X86_EFLAGS_IF);
50 /* See xen_irq_enable() for why preemption must be disabled. */
51 preempt_disable();
52 vcpu = this_cpu_read(xen_vcpu);
53 vcpu->evtchn_upcall_mask = flags;
55 if (flags == 0) {
56 barrier(); /* unmask then check (avoid races) */
57 if (unlikely(vcpu->evtchn_upcall_pending))
58 xen_force_evtchn_callback();
59 preempt_enable();
60 } else
61 preempt_enable_no_resched();
63 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
65 static void xen_irq_disable(void)
67 /* There's a one instruction preempt window here. We need to
68 make sure we're don't switch CPUs between getting the vcpu
69 pointer and updating the mask. */
70 preempt_disable();
71 this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
72 preempt_enable_no_resched();
74 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
76 static void xen_irq_enable(void)
78 struct vcpu_info *vcpu;
81 * We may be preempted as soon as vcpu->evtchn_upcall_mask is
82 * cleared, so disable preemption to ensure we check for
83 * events on the VCPU we are still running on.
85 preempt_disable();
87 vcpu = this_cpu_read(xen_vcpu);
88 vcpu->evtchn_upcall_mask = 0;
90 /* Doesn't matter if we get preempted here, because any
91 pending event will get dealt with anyway. */
93 barrier(); /* unmask then check (avoid races) */
94 if (unlikely(vcpu->evtchn_upcall_pending))
95 xen_force_evtchn_callback();
97 preempt_enable();
99 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
101 static void xen_safe_halt(void)
103 /* Blocking includes an implicit local_irq_enable(). */
104 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
105 BUG();
108 static void xen_halt(void)
110 if (irqs_disabled())
111 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
112 else
113 xen_safe_halt();
116 static const struct pv_irq_ops xen_irq_ops __initconst = {
117 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
118 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
119 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
120 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
122 .safe_halt = xen_safe_halt,
123 .halt = xen_halt,
124 #ifdef CONFIG_X86_64
125 .adjust_exception_frame = xen_adjust_exception_frame,
126 #endif
129 void __init xen_init_irq_ops(void)
131 pv_irq_ops = xen_irq_ops;
132 x86_init.irqs.intr_init = xen_init_IRQ;