x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / powerpc / include / asm / hw_irq.h
blobeba60416536ec0955ff4a4131b4d788db2f25f9c
1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H
7 #ifdef __KERNEL__
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
14 #ifdef CONFIG_PPC64
17 * PACA flags in paca->irq_happened.
19 * This bits are set when interrupts occur while soft-disabled
20 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
21 * is set whenever we manually hard disable.
23 #define PACA_IRQ_HARD_DIS 0x01
24 #define PACA_IRQ_DBELL 0x02
25 #define PACA_IRQ_EE 0x04
26 #define PACA_IRQ_DEC 0x08 /* Or FIT */
27 #define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
28 #define PACA_IRQ_HMI 0x20
30 #endif /* CONFIG_PPC64 */
32 #ifndef __ASSEMBLY__
34 extern void __replay_interrupt(unsigned int vector);
36 extern void timer_interrupt(struct pt_regs *);
37 extern void performance_monitor_exception(struct pt_regs *regs);
38 extern void WatchdogException(struct pt_regs *regs);
39 extern void unknown_exception(struct pt_regs *regs);
41 #ifdef CONFIG_PPC64
42 #include <asm/paca.h>
44 static inline unsigned long arch_local_save_flags(void)
46 unsigned long flags;
48 asm volatile(
49 "lbz %0,%1(13)"
50 : "=r" (flags)
51 : "i" (offsetof(struct paca_struct, soft_enabled)));
53 return flags;
56 static inline unsigned long arch_local_irq_disable(void)
58 unsigned long flags, zero;
60 asm volatile(
61 "li %1,0; lbz %0,%2(13); stb %1,%2(13)"
62 : "=r" (flags), "=&r" (zero)
63 : "i" (offsetof(struct paca_struct, soft_enabled))
64 : "memory");
66 return flags;
69 extern void arch_local_irq_restore(unsigned long);
71 static inline void arch_local_irq_enable(void)
73 arch_local_irq_restore(1);
76 static inline unsigned long arch_local_irq_save(void)
78 return arch_local_irq_disable();
81 static inline bool arch_irqs_disabled_flags(unsigned long flags)
83 return flags == 0;
86 static inline bool arch_irqs_disabled(void)
88 return arch_irqs_disabled_flags(arch_local_save_flags());
91 #ifdef CONFIG_PPC_BOOK3E
92 #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
93 #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
94 #else
95 #define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
96 #define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
97 #endif
99 #define hard_irq_disable() do { \
100 u8 _was_enabled; \
101 __hard_irq_disable(); \
102 _was_enabled = local_paca->soft_enabled; \
103 local_paca->soft_enabled = 0; \
104 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
105 if (_was_enabled) \
106 trace_hardirqs_off(); \
107 } while(0)
109 static inline bool lazy_irq_pending(void)
111 return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
115 * This is called by asynchronous interrupts to conditionally
116 * re-enable hard interrupts when soft-disabled after having
117 * cleared the source of the interrupt
119 static inline void may_hard_irq_enable(void)
121 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
122 if (!(get_paca()->irq_happened & PACA_IRQ_EE))
123 __hard_irq_enable();
126 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
128 return !regs->softe;
131 extern bool prep_irq_for_idle(void);
133 extern void force_external_irq_replay(void);
135 #else /* CONFIG_PPC64 */
137 #define SET_MSR_EE(x) mtmsr(x)
139 static inline unsigned long arch_local_save_flags(void)
141 return mfmsr();
144 static inline void arch_local_irq_restore(unsigned long flags)
146 #if defined(CONFIG_BOOKE)
147 asm volatile("wrtee %0" : : "r" (flags) : "memory");
148 #else
149 mtmsr(flags);
150 #endif
153 static inline unsigned long arch_local_irq_save(void)
155 unsigned long flags = arch_local_save_flags();
156 #ifdef CONFIG_BOOKE
157 asm volatile("wrteei 0" : : : "memory");
158 #elif defined(CONFIG_PPC_8xx)
159 wrtspr(SPRN_EID);
160 #else
161 SET_MSR_EE(flags & ~MSR_EE);
162 #endif
163 return flags;
166 static inline void arch_local_irq_disable(void)
168 #ifdef CONFIG_BOOKE
169 asm volatile("wrteei 0" : : : "memory");
170 #elif defined(CONFIG_PPC_8xx)
171 wrtspr(SPRN_EID);
172 #else
173 arch_local_irq_save();
174 #endif
177 static inline void arch_local_irq_enable(void)
179 #ifdef CONFIG_BOOKE
180 asm volatile("wrteei 1" : : : "memory");
181 #elif defined(CONFIG_PPC_8xx)
182 wrtspr(SPRN_EIE);
183 #else
184 unsigned long msr = mfmsr();
185 SET_MSR_EE(msr | MSR_EE);
186 #endif
189 static inline bool arch_irqs_disabled_flags(unsigned long flags)
191 return (flags & MSR_EE) == 0;
194 static inline bool arch_irqs_disabled(void)
196 return arch_irqs_disabled_flags(arch_local_save_flags());
199 #define hard_irq_disable() arch_local_irq_disable()
201 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
203 return !(regs->msr & MSR_EE);
206 static inline void may_hard_irq_enable(void) { }
208 #endif /* CONFIG_PPC64 */
210 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
213 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
214 * or should we not care like we do now ? --BenH.
216 struct irq_chip;
218 #endif /* __ASSEMBLY__ */
219 #endif /* __KERNEL__ */
220 #endif /* _ASM_POWERPC_HW_IRQ_H */