Linux 2.6.25.20
[linux/fpc-iii.git] / include / asm-x86 / irqflags.h
blob0e2292483b3573f8963569431e2e71eb0b6b1391
1 #ifndef _X86_IRQFLAGS_H_
2 #define _X86_IRQFLAGS_H_
4 #include <asm/processor-flags.h>
6 #ifndef __ASSEMBLY__
7 /*
8 * Interrupt control:
9 */
11 static inline unsigned long native_save_fl(void)
13 unsigned long flags;
15 __asm__ __volatile__(
16 "# __raw_save_flags\n\t"
17 "pushf ; pop %0"
18 : "=g" (flags)
19 : /* no input */
20 : "memory"
23 return flags;
26 static inline void native_restore_fl(unsigned long flags)
28 __asm__ __volatile__(
29 "push %0 ; popf"
30 : /* no output */
31 :"g" (flags)
32 :"memory", "cc"
36 static inline void native_irq_disable(void)
38 asm volatile("cli": : :"memory");
41 static inline void native_irq_enable(void)
43 asm volatile("sti": : :"memory");
46 static inline void native_safe_halt(void)
48 asm volatile("sti; hlt": : :"memory");
51 static inline void native_halt(void)
53 asm volatile("hlt": : :"memory");
56 #endif
58 #ifdef CONFIG_PARAVIRT
59 #include <asm/paravirt.h>
60 #else
61 #ifndef __ASSEMBLY__
63 static inline unsigned long __raw_local_save_flags(void)
65 return native_save_fl();
68 static inline void raw_local_irq_restore(unsigned long flags)
70 native_restore_fl(flags);
73 #ifdef CONFIG_X86_VSMP
76 * Interrupt control for the VSMP architecture:
79 static inline void raw_local_irq_disable(void)
81 unsigned long flags = __raw_local_save_flags();
82 raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
85 static inline void raw_local_irq_enable(void)
87 unsigned long flags = __raw_local_save_flags();
88 raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
91 #else
93 static inline void raw_local_irq_disable(void)
95 native_irq_disable();
98 static inline void raw_local_irq_enable(void)
100 native_irq_enable();
103 #endif
106 * Used in the idle loop; sti takes one instruction cycle
107 * to complete:
109 static inline void raw_safe_halt(void)
111 native_safe_halt();
115 * Used when interrupts are already enabled or to
116 * shutdown the processor:
118 static inline void halt(void)
120 native_halt();
124 * For spinlocks, etc:
126 static inline unsigned long __raw_local_irq_save(void)
128 unsigned long flags = __raw_local_save_flags();
130 raw_local_irq_disable();
132 return flags;
134 #else
136 #define ENABLE_INTERRUPTS(x) sti
137 #define DISABLE_INTERRUPTS(x) cli
139 #ifdef CONFIG_X86_64
140 #define INTERRUPT_RETURN iretq
141 #define ENABLE_INTERRUPTS_SYSCALL_RET \
142 movq %gs:pda_oldrsp, %rsp; \
143 swapgs; \
144 sysretq;
145 #else
146 #define INTERRUPT_RETURN iret
147 #define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit
148 #define GET_CR0_INTO_EAX movl %cr0, %eax
149 #endif
152 #endif /* __ASSEMBLY__ */
153 #endif /* CONFIG_PARAVIRT */
155 #ifndef __ASSEMBLY__
156 #define raw_local_save_flags(flags) \
157 do { (flags) = __raw_local_save_flags(); } while (0)
159 #define raw_local_irq_save(flags) \
160 do { (flags) = __raw_local_irq_save(); } while (0)
162 #ifdef CONFIG_X86_VSMP
163 static inline int raw_irqs_disabled_flags(unsigned long flags)
165 return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
167 #else
168 static inline int raw_irqs_disabled_flags(unsigned long flags)
170 return !(flags & X86_EFLAGS_IF);
172 #endif
174 static inline int raw_irqs_disabled(void)
176 unsigned long flags = __raw_local_save_flags();
178 return raw_irqs_disabled_flags(flags);
182 * makes the traced hardirq state match with the machine state
184 * should be a rarely used function, only in places where its
185 * otherwise impossible to know the irq state, like in traps.
187 static inline void trace_hardirqs_fixup_flags(unsigned long flags)
189 if (raw_irqs_disabled_flags(flags))
190 trace_hardirqs_off();
191 else
192 trace_hardirqs_on();
195 static inline void trace_hardirqs_fixup(void)
197 unsigned long flags = __raw_local_save_flags();
199 trace_hardirqs_fixup_flags(flags);
202 #else
204 #ifdef CONFIG_X86_64
206 * Currently paravirt can't handle swapgs nicely when we
207 * don't have a stack we can rely on (such as a user space
208 * stack). So we either find a way around these or just fault
209 * and emulate if a guest tries to call swapgs directly.
211 * Either way, this is a good way to document that we don't
212 * have a reliable stack. x86_64 only.
214 #define SWAPGS_UNSAFE_STACK swapgs
215 #define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk
216 #define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk
217 #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
218 #define ARCH_LOCKDEP_SYS_EXIT_IRQ \
219 TRACE_IRQS_ON; \
220 sti; \
221 SAVE_REST; \
222 LOCKDEP_SYS_EXIT; \
223 RESTORE_REST; \
224 cli; \
225 TRACE_IRQS_OFF;
227 #else
228 #define ARCH_TRACE_IRQS_ON \
229 pushl %eax; \
230 pushl %ecx; \
231 pushl %edx; \
232 call trace_hardirqs_on; \
233 popl %edx; \
234 popl %ecx; \
235 popl %eax;
237 #define ARCH_TRACE_IRQS_OFF \
238 pushl %eax; \
239 pushl %ecx; \
240 pushl %edx; \
241 call trace_hardirqs_off; \
242 popl %edx; \
243 popl %ecx; \
244 popl %eax;
246 #define ARCH_LOCKDEP_SYS_EXIT \
247 pushl %eax; \
248 pushl %ecx; \
249 pushl %edx; \
250 call lockdep_sys_exit; \
251 popl %edx; \
252 popl %ecx; \
253 popl %eax;
255 #define ARCH_LOCKDEP_SYS_EXIT_IRQ
256 #endif
258 #ifdef CONFIG_TRACE_IRQFLAGS
259 # define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON
260 # define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF
261 #else
262 # define TRACE_IRQS_ON
263 # define TRACE_IRQS_OFF
264 #endif
265 #ifdef CONFIG_DEBUG_LOCK_ALLOC
266 # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
267 # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
268 # else
269 # define LOCKDEP_SYS_EXIT
270 # define LOCKDEP_SYS_EXIT_IRQ
271 # endif
273 #endif /* __ASSEMBLY__ */
274 #endif