x86: unify pageattr_32.c and pageattr_64.c
[wrt350n-kernel.git] / include / asm-x86 / irqflags.h
blob92021c1ffa3ae8282f4a1dcfd685e31ba03c4f5c
1 #ifndef _X86_IRQFLAGS_H_
2 #define _X86_IRQFLAGS_H_
4 #include <asm/processor-flags.h>
6 #ifndef __ASSEMBLY__
7 /*
8 * Interrupt control:
9 */
11 static inline unsigned long native_save_fl(void)
13 unsigned long flags;
15 __asm__ __volatile__(
16 "# __raw_save_flags\n\t"
17 "pushf ; pop %0"
18 : "=g" (flags)
19 : /* no input */
20 : "memory"
23 return flags;
26 static inline void native_restore_fl(unsigned long flags)
28 __asm__ __volatile__(
29 "push %0 ; popf"
30 : /* no output */
31 :"g" (flags)
32 :"memory", "cc"
36 static inline void native_irq_disable(void)
38 asm volatile("cli": : :"memory");
41 static inline void native_irq_enable(void)
43 asm volatile("sti": : :"memory");
46 static inline void native_safe_halt(void)
48 asm volatile("sti; hlt": : :"memory");
51 static inline void native_halt(void)
53 asm volatile("hlt": : :"memory");
56 #endif
58 #ifdef CONFIG_PARAVIRT
59 #include <asm/paravirt.h>
60 #else
61 #ifndef __ASSEMBLY__
63 static inline unsigned long __raw_local_save_flags(void)
65 return native_save_fl();
68 static inline void raw_local_irq_restore(unsigned long flags)
70 native_restore_fl(flags);
73 static inline void raw_local_irq_disable(void)
75 native_irq_disable();
78 static inline void raw_local_irq_enable(void)
80 native_irq_enable();
84 * Used in the idle loop; sti takes one instruction cycle
85 * to complete:
87 static inline void raw_safe_halt(void)
89 native_safe_halt();
93 * Used when interrupts are already enabled or to
94 * shutdown the processor:
96 static inline void halt(void)
98 native_halt();
102 * For spinlocks, etc:
104 static inline unsigned long __raw_local_irq_save(void)
106 unsigned long flags = __raw_local_save_flags();
108 raw_local_irq_disable();
110 return flags;
112 #else
114 #define ENABLE_INTERRUPTS(x) sti
115 #define DISABLE_INTERRUPTS(x) cli
117 #ifdef CONFIG_X86_64
118 #define INTERRUPT_RETURN iretq
119 #define ENABLE_INTERRUPTS_SYSCALL_RET \
120 movq %gs:pda_oldrsp, %rsp; \
121 swapgs; \
122 sysretq;
123 #else
124 #define INTERRUPT_RETURN iret
125 #define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit
126 #define GET_CR0_INTO_EAX movl %cr0, %eax
127 #endif
130 #endif /* __ASSEMBLY__ */
131 #endif /* CONFIG_PARAVIRT */
133 #ifndef __ASSEMBLY__
134 #define raw_local_save_flags(flags) \
135 do { (flags) = __raw_local_save_flags(); } while (0)
137 #define raw_local_irq_save(flags) \
138 do { (flags) = __raw_local_irq_save(); } while (0)
140 static inline int raw_irqs_disabled_flags(unsigned long flags)
142 return !(flags & X86_EFLAGS_IF);
145 static inline int raw_irqs_disabled(void)
147 unsigned long flags = __raw_local_save_flags();
149 return raw_irqs_disabled_flags(flags);
153 * makes the traced hardirq state match with the machine state
155 * should be a rarely used function, only in places where its
156 * otherwise impossible to know the irq state, like in traps.
158 static inline void trace_hardirqs_fixup_flags(unsigned long flags)
160 if (raw_irqs_disabled_flags(flags))
161 trace_hardirqs_off();
162 else
163 trace_hardirqs_on();
166 static inline void trace_hardirqs_fixup(void)
168 unsigned long flags = __raw_local_save_flags();
170 trace_hardirqs_fixup_flags(flags);
173 #else
175 #ifdef CONFIG_X86_64
177 * Currently paravirt can't handle swapgs nicely when we
178 * don't have a stack we can rely on (such as a user space
179 * stack). So we either find a way around these or just fault
180 * and emulate if a guest tries to call swapgs directly.
182 * Either way, this is a good way to document that we don't
183 * have a reliable stack. x86_64 only.
185 #define SWAPGS_UNSAFE_STACK swapgs
186 #define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk
187 #define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk
188 #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
189 #define ARCH_LOCKDEP_SYS_EXIT_IRQ \
190 TRACE_IRQS_ON; \
191 sti; \
192 SAVE_REST; \
193 LOCKDEP_SYS_EXIT; \
194 RESTORE_REST; \
195 cli; \
196 TRACE_IRQS_OFF;
198 #else
199 #define ARCH_TRACE_IRQS_ON \
200 pushl %eax; \
201 pushl %ecx; \
202 pushl %edx; \
203 call trace_hardirqs_on; \
204 popl %edx; \
205 popl %ecx; \
206 popl %eax;
208 #define ARCH_TRACE_IRQS_OFF \
209 pushl %eax; \
210 pushl %ecx; \
211 pushl %edx; \
212 call trace_hardirqs_off; \
213 popl %edx; \
214 popl %ecx; \
215 popl %eax;
217 #define ARCH_LOCKDEP_SYS_EXIT \
218 pushl %eax; \
219 pushl %ecx; \
220 pushl %edx; \
221 call lockdep_sys_exit; \
222 popl %edx; \
223 popl %ecx; \
224 popl %eax;
226 #define ARCH_LOCKDEP_SYS_EXIT_IRQ
227 #endif
229 #ifdef CONFIG_TRACE_IRQFLAGS
230 # define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON
231 # define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF
232 #else
233 # define TRACE_IRQS_ON
234 # define TRACE_IRQS_OFF
235 #endif
236 #ifdef CONFIG_DEBUG_LOCK_ALLOC
237 # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
238 # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
239 # else
240 # define LOCKDEP_SYS_EXIT
241 # define LOCKDEP_SYS_EXIT_IRQ
242 # endif
244 #endif /* __ASSEMBLY__ */
245 #endif