WIP FPC-III support
[linux/fpc-iii.git] / arch / powerpc / include / asm / hw_irq.h
blob0363734ff56e0f9fc0e2c6ddb013cea24d7cbbd6
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 */
5 #ifndef _ASM_POWERPC_HW_IRQ_H
6 #define _ASM_POWERPC_HW_IRQ_H
8 #ifdef __KERNEL__
10 #include <linux/errno.h>
11 #include <linux/compiler.h>
12 #include <asm/ptrace.h>
13 #include <asm/processor.h>
15 #ifdef CONFIG_PPC64
18 * PACA flags in paca->irq_happened.
20 * This bits are set when interrupts occur while soft-disabled
21 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
22 * is set whenever we manually hard disable.
24 #define PACA_IRQ_HARD_DIS 0x01
25 #define PACA_IRQ_DBELL 0x02
26 #define PACA_IRQ_EE 0x04
27 #define PACA_IRQ_DEC 0x08 /* Or FIT */
28 #define PACA_IRQ_HMI 0x10
29 #define PACA_IRQ_PMI 0x20
32 * Some soft-masked interrupts must be hard masked until they are replayed
33 * (e.g., because the soft-masked handler does not clear the exception).
35 #ifdef CONFIG_PPC_BOOK3S
36 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
37 #else
38 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
39 #endif
42 * flags for paca->irq_soft_mask
44 #define IRQS_ENABLED 0
45 #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
46 #define IRQS_PMI_DISABLED 2
47 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
49 #endif /* CONFIG_PPC64 */
51 #ifndef __ASSEMBLY__
53 extern void replay_system_reset(void);
54 extern void replay_soft_interrupts(void);
56 extern void timer_interrupt(struct pt_regs *);
57 extern void timer_broadcast_interrupt(void);
58 extern void performance_monitor_exception(struct pt_regs *regs);
59 extern void WatchdogException(struct pt_regs *regs);
60 extern void unknown_exception(struct pt_regs *regs);
62 #ifdef CONFIG_PPC64
63 #include <asm/paca.h>
65 static inline notrace unsigned long irq_soft_mask_return(void)
67 unsigned long flags;
69 asm volatile(
70 "lbz %0,%1(13)"
71 : "=r" (flags)
72 : "i" (offsetof(struct paca_struct, irq_soft_mask)));
74 return flags;
78 * The "memory" clobber acts as both a compiler barrier
79 * for the critical section and as a clobber because
80 * we changed paca->irq_soft_mask
82 static inline notrace void irq_soft_mask_set(unsigned long mask)
84 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
86 * The irq mask must always include the STD bit if any are set.
88 * and interrupts don't get replayed until the standard
89 * interrupt (local_irq_disable()) is unmasked.
91 * Other masks must only provide additional masking beyond
92 * the standard, and they are also not replayed until the
93 * standard interrupt becomes unmasked.
95 * This could be changed, but it will require partial
96 * unmasks to be replayed, among other things. For now, take
97 * the simple approach.
99 WARN_ON(mask && !(mask & IRQS_DISABLED));
100 #endif
102 asm volatile(
103 "stb %0,%1(13)"
105 : "r" (mask),
106 "i" (offsetof(struct paca_struct, irq_soft_mask))
107 : "memory");
110 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
112 unsigned long flags;
114 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
115 WARN_ON(mask && !(mask & IRQS_DISABLED));
116 #endif
118 asm volatile(
119 "lbz %0,%1(13); stb %2,%1(13)"
120 : "=&r" (flags)
121 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
122 "r" (mask)
123 : "memory");
125 return flags;
128 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
130 unsigned long flags, tmp;
132 asm volatile(
133 "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
134 : "=&r" (flags), "=r" (tmp)
135 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
136 "r" (mask)
137 : "memory");
139 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
140 WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
141 #endif
143 return flags;
146 static inline unsigned long arch_local_save_flags(void)
148 return irq_soft_mask_return();
151 static inline void arch_local_irq_disable(void)
153 irq_soft_mask_set(IRQS_DISABLED);
156 extern void arch_local_irq_restore(unsigned long);
158 static inline void arch_local_irq_enable(void)
160 arch_local_irq_restore(IRQS_ENABLED);
163 static inline unsigned long arch_local_irq_save(void)
165 return irq_soft_mask_set_return(IRQS_DISABLED);
168 static inline bool arch_irqs_disabled_flags(unsigned long flags)
170 return flags & IRQS_DISABLED;
173 static inline bool arch_irqs_disabled(void)
175 return arch_irqs_disabled_flags(arch_local_save_flags());
178 #ifdef CONFIG_PPC_BOOK3S
180 * To support disabling and enabling of irq with PMI, set of
181 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
182 * functions are added. These macros are implemented using generic
183 * linux local_irq_* code from include/linux/irqflags.h.
185 #define raw_local_irq_pmu_save(flags) \
186 do { \
187 typecheck(unsigned long, flags); \
188 flags = irq_soft_mask_or_return(IRQS_DISABLED | \
189 IRQS_PMI_DISABLED); \
190 } while(0)
192 #define raw_local_irq_pmu_restore(flags) \
193 do { \
194 typecheck(unsigned long, flags); \
195 arch_local_irq_restore(flags); \
196 } while(0)
198 #ifdef CONFIG_TRACE_IRQFLAGS
199 #define powerpc_local_irq_pmu_save(flags) \
200 do { \
201 raw_local_irq_pmu_save(flags); \
202 if (!raw_irqs_disabled_flags(flags)) \
203 trace_hardirqs_off(); \
204 } while(0)
205 #define powerpc_local_irq_pmu_restore(flags) \
206 do { \
207 if (!raw_irqs_disabled_flags(flags)) \
208 trace_hardirqs_on(); \
209 raw_local_irq_pmu_restore(flags); \
210 } while(0)
211 #else
212 #define powerpc_local_irq_pmu_save(flags) \
213 do { \
214 raw_local_irq_pmu_save(flags); \
215 } while(0)
216 #define powerpc_local_irq_pmu_restore(flags) \
217 do { \
218 raw_local_irq_pmu_restore(flags); \
219 } while (0)
220 #endif /* CONFIG_TRACE_IRQFLAGS */
222 #endif /* CONFIG_PPC_BOOK3S */
224 #ifdef CONFIG_PPC_BOOK3E
225 #define __hard_irq_enable() wrtee(MSR_EE)
226 #define __hard_irq_disable() wrtee(0)
227 #define __hard_EE_RI_disable() wrtee(0)
228 #define __hard_RI_enable() do { } while (0)
229 #else
230 #define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
231 #define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
232 #define __hard_EE_RI_disable() __mtmsrd(0, 1)
233 #define __hard_RI_enable() __mtmsrd(MSR_RI, 1)
234 #endif
236 #define hard_irq_disable() do { \
237 unsigned long flags; \
238 __hard_irq_disable(); \
239 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
240 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
241 if (!arch_irqs_disabled_flags(flags)) { \
242 asm ("stdx %%r1, 0, %1 ;" \
243 : "=m" (local_paca->saved_r1) \
244 : "b" (&local_paca->saved_r1)); \
245 trace_hardirqs_off(); \
247 } while(0)
249 static inline bool __lazy_irq_pending(u8 irq_happened)
251 return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
255 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
257 static inline bool lazy_irq_pending(void)
259 return __lazy_irq_pending(get_paca()->irq_happened);
263 * Check if a lazy IRQ is pending, with no debugging checks.
264 * Should be called with IRQs hard disabled.
265 * For use in RI disabled code or other constrained situations.
267 static inline bool lazy_irq_pending_nocheck(void)
269 return __lazy_irq_pending(local_paca->irq_happened);
273 * This is called by asynchronous interrupts to conditionally
274 * re-enable hard interrupts after having cleared the source
275 * of the interrupt. They are kept disabled if there is a different
276 * soft-masked interrupt pending that requires hard masking.
278 static inline void may_hard_irq_enable(void)
280 if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
281 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
282 __hard_irq_enable();
286 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
288 return (regs->softe & IRQS_DISABLED);
291 extern bool prep_irq_for_idle(void);
292 extern bool prep_irq_for_idle_irqsoff(void);
293 extern void irq_set_pending_from_srr1(unsigned long srr1);
295 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
297 extern void force_external_irq_replay(void);
299 #else /* CONFIG_PPC64 */
301 static inline unsigned long arch_local_save_flags(void)
303 return mfmsr();
306 static inline void arch_local_irq_restore(unsigned long flags)
308 if (IS_ENABLED(CONFIG_BOOKE))
309 wrtee(flags);
310 else
311 mtmsr(flags);
314 static inline unsigned long arch_local_irq_save(void)
316 unsigned long flags = arch_local_save_flags();
318 if (IS_ENABLED(CONFIG_BOOKE))
319 wrtee(0);
320 else if (IS_ENABLED(CONFIG_PPC_8xx))
321 wrtspr(SPRN_EID);
322 else
323 mtmsr(flags & ~MSR_EE);
325 return flags;
328 static inline void arch_local_irq_disable(void)
330 if (IS_ENABLED(CONFIG_BOOKE))
331 wrtee(0);
332 else if (IS_ENABLED(CONFIG_PPC_8xx))
333 wrtspr(SPRN_EID);
334 else
335 mtmsr(mfmsr() & ~MSR_EE);
338 static inline void arch_local_irq_enable(void)
340 if (IS_ENABLED(CONFIG_BOOKE))
341 wrtee(MSR_EE);
342 else if (IS_ENABLED(CONFIG_PPC_8xx))
343 wrtspr(SPRN_EIE);
344 else
345 mtmsr(mfmsr() | MSR_EE);
348 static inline bool arch_irqs_disabled_flags(unsigned long flags)
350 return (flags & MSR_EE) == 0;
353 static inline bool arch_irqs_disabled(void)
355 return arch_irqs_disabled_flags(arch_local_save_flags());
358 #define hard_irq_disable() arch_local_irq_disable()
360 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
362 return !(regs->msr & MSR_EE);
365 static inline void may_hard_irq_enable(void) { }
367 #endif /* CONFIG_PPC64 */
369 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
371 #endif /* __ASSEMBLY__ */
372 #endif /* __KERNEL__ */
373 #endif /* _ASM_POWERPC_HW_IRQ_H */