xhci: Do not run xhci_cleanup_msix with irq disabled
[linux/fpc-iii.git] / arch / sparc / kernel / kstack.h
blob53dfb92e09fb5b1a0b37e8f393fa6e3c6b0f3329
1 #ifndef _KSTACK_H
2 #define _KSTACK_H
4 #include <linux/thread_info.h>
5 #include <linux/sched.h>
6 #include <asm/ptrace.h>
7 #include <asm/irq.h>
9 /* SP must be STACK_BIAS adjusted already. */
10 static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
12 unsigned long base = (unsigned long) tp;
14 /* Stack pointer must be 16-byte aligned. */
15 if (sp & (16UL - 1))
16 return false;
18 if (sp >= (base + sizeof(struct thread_info)) &&
19 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
20 return true;
22 if (hardirq_stack[tp->cpu]) {
23 base = (unsigned long) hardirq_stack[tp->cpu];
24 if (sp >= base &&
25 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
26 return true;
27 base = (unsigned long) softirq_stack[tp->cpu];
28 if (sp >= base &&
29 sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
30 return true;
32 return false;
35 /* Does "regs" point to a valid pt_regs trap frame? */
36 static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
38 unsigned long base = (unsigned long) tp;
39 unsigned long addr = (unsigned long) regs;
41 if (addr >= base &&
42 addr <= (base + THREAD_SIZE - sizeof(*regs)))
43 goto check_magic;
45 if (hardirq_stack[tp->cpu]) {
46 base = (unsigned long) hardirq_stack[tp->cpu];
47 if (addr >= base &&
48 addr <= (base + THREAD_SIZE - sizeof(*regs)))
49 goto check_magic;
50 base = (unsigned long) softirq_stack[tp->cpu];
51 if (addr >= base &&
52 addr <= (base + THREAD_SIZE - sizeof(*regs)))
53 goto check_magic;
55 return false;
57 check_magic:
58 if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
59 return true;
60 return false;
64 static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
66 void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
68 __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
69 if (orig_sp < sp ||
70 orig_sp > (sp + THREAD_SIZE)) {
71 sp += THREAD_SIZE - 192 - STACK_BIAS;
72 __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
75 return orig_sp;
78 static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
80 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
83 #endif /* _KSTACK_H */