fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-x86 / irqflags_32.h
blobd058b04e0083a2e1aa02f59b6ea6c42987188f8a
1 /*
2 * include/asm-i386/irqflags.h
4 * IRQ flags handling
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10 #ifndef _ASM_IRQFLAGS_H
11 #define _ASM_IRQFLAGS_H
12 #include <asm/processor-flags.h>
14 #ifndef __ASSEMBLY__
15 static inline unsigned long native_save_fl(void)
17 unsigned long f;
18 asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
19 return f;
22 static inline void native_restore_fl(unsigned long f)
24 asm volatile("pushl %0 ; popfl": /* no output */
25 :"g" (f)
26 :"memory", "cc");
29 static inline void native_irq_disable(void)
31 asm volatile("cli": : :"memory");
34 static inline void native_irq_enable(void)
36 asm volatile("sti": : :"memory");
39 static inline void native_safe_halt(void)
41 asm volatile("sti; hlt": : :"memory");
44 static inline void native_halt(void)
46 asm volatile("hlt": : :"memory");
48 #endif /* __ASSEMBLY__ */
50 #ifdef CONFIG_PARAVIRT
51 #include <asm/paravirt.h>
52 #else
53 #ifndef __ASSEMBLY__
55 static inline unsigned long __raw_local_save_flags(void)
57 return native_save_fl();
60 static inline void raw_local_irq_restore(unsigned long flags)
62 native_restore_fl(flags);
65 static inline void raw_local_irq_disable(void)
67 native_irq_disable();
70 static inline void raw_local_irq_enable(void)
72 native_irq_enable();
76 * Used in the idle loop; sti takes one instruction cycle
77 * to complete:
79 static inline void raw_safe_halt(void)
81 native_safe_halt();
85 * Used when interrupts are already enabled or to
86 * shutdown the processor:
88 static inline void halt(void)
90 native_halt();
94 * For spinlocks, etc:
96 static inline unsigned long __raw_local_irq_save(void)
98 unsigned long flags = __raw_local_save_flags();
100 raw_local_irq_disable();
102 return flags;
105 #else
106 #define DISABLE_INTERRUPTS(clobbers) cli
107 #define ENABLE_INTERRUPTS(clobbers) sti
108 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
109 #define INTERRUPT_RETURN iret
110 #define GET_CR0_INTO_EAX movl %cr0, %eax
111 #endif /* __ASSEMBLY__ */
112 #endif /* CONFIG_PARAVIRT */
114 #ifndef __ASSEMBLY__
115 #define raw_local_save_flags(flags) \
116 do { (flags) = __raw_local_save_flags(); } while (0)
118 #define raw_local_irq_save(flags) \
119 do { (flags) = __raw_local_irq_save(); } while (0)
121 static inline int raw_irqs_disabled_flags(unsigned long flags)
123 return !(flags & X86_EFLAGS_IF);
126 static inline int raw_irqs_disabled(void)
128 unsigned long flags = __raw_local_save_flags();
130 return raw_irqs_disabled_flags(flags);
132 #endif /* __ASSEMBLY__ */
135 * Do the CPU's IRQ-state tracing from assembly code. We call a
136 * C function, so save all the C-clobbered registers:
138 #ifdef CONFIG_TRACE_IRQFLAGS
140 # define TRACE_IRQS_ON \
141 pushl %eax; \
142 pushl %ecx; \
143 pushl %edx; \
144 call trace_hardirqs_on; \
145 popl %edx; \
146 popl %ecx; \
147 popl %eax;
149 # define TRACE_IRQS_OFF \
150 pushl %eax; \
151 pushl %ecx; \
152 pushl %edx; \
153 call trace_hardirqs_off; \
154 popl %edx; \
155 popl %ecx; \
156 popl %eax;
158 #else
159 # define TRACE_IRQS_ON
160 # define TRACE_IRQS_OFF
161 #endif
163 #ifdef CONFIG_DEBUG_LOCK_ALLOC
164 # define LOCKDEP_SYS_EXIT \
165 pushl %eax; \
166 pushl %ecx; \
167 pushl %edx; \
168 call lockdep_sys_exit; \
169 popl %edx; \
170 popl %ecx; \
171 popl %eax;
172 #else
173 # define LOCKDEP_SYS_EXIT
174 #endif
176 #endif