1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
6 #include <linux/config.h>
7 #include <linux/kernel.h>
8 #include <asm/proc-fns.h>
10 #define vectors_base() (0)
12 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
, int size
)
14 extern void __bad_xchg(volatile void *, int);
17 case 1: return cpu_xchg_1(x
, ptr
);
18 case 4: return cpu_xchg_4(x
, ptr
);
19 default: __bad_xchg(ptr
, size
);
25 * We need to turn the caches off before calling the reset vector - RiscOS
26 * messes up if we don't
28 #define proc_hard_reset() cpu_proc_fin()
31 * A couple of speedups for the ARM
37 #define local_irq_enable() \
40 __asm__ __volatile__( \
41 " mov %0, pc @ sti\n" \
42 " bic %0, %0, #0x08000000\n" \
52 #define local_irq_disable() \
55 __asm__ __volatile__( \
56 " mov %0, pc @ cli\n" \
57 " orr %0, %0, #0x08000000\n" \
64 /* Disable FIQs (clf) */
66 #define __clf() do { \
68 __asm__ __volatile__( \
69 " mov %0, pc @ clf\n" \
70 " orr %0, %0, #0x04000000\n" \
75 /* Enable FIQs (stf) */
77 #define __stf() do { \
79 __asm__ __volatile__( \
80 " mov %0, pc @ stf\n" \
81 " bic %0, %0, #0x04000000\n" \
87 * save current IRQ & FIQ state
89 #define local_save_flags(x) \
91 __asm__ __volatile__( \
92 " mov %0, pc @ save_flags\n" \
93 " and %0, %0, #0x0c000000\n" \
98 * Save the current interrupt enable state & disable IRQs
100 #define local_irq_save(x) \
102 unsigned long temp; \
103 __asm__ __volatile__( \
104 " mov %0, pc @ save_flags_cli\n" \
105 " orr %1, %0, #0x08000000\n" \
106 " and %0, %0, #0x0c000000\n" \
108 : "=r" (x), "=r" (temp) \
114 * restore saved IRQ & FIQ state
116 #define local_irq_restore(x) \
118 unsigned long temp; \
119 __asm__ __volatile__( \
120 " mov %0, pc @ restore_flags\n" \
121 " bic %0, %0, #0x0c000000\n" \
122 " orr %0, %0, %1\n" \
132 /* information about the system we're running on */
133 extern unsigned int system_rev
;
134 extern unsigned int system_serial_low
;
135 extern unsigned int system_serial_high
;
139 void die(const char *msg
, struct pt_regs
*regs
, int err
)
140 __attribute__((noreturn
));
142 void die_if_kernel(const char *str
, struct pt_regs
*regs
, int err
);
144 void hook_fault_code(int nr
, int (*fn
)(unsigned long, unsigned int,
146 int sig
, const char *name
);
148 #define xchg(ptr,x) \
149 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
151 #define tas(ptr) (xchg((ptr),1))
153 extern asmlinkage
void __backtrace(void);
156 * Include processor dependent parts
159 #define mb() __asm__ __volatile__ ("" : : : "memory")
162 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
164 #define prepare_to_switch() do { } while(0)
167 * switch_to(prev, next) should switch from task `prev' to `next'
168 * `prev' will never be the same as `next'.
169 * The `mb' is to tell GCC not to cache `current' across this call.
173 extern struct task_struct
*__switch_to(struct thread_info
*, struct thread_info
*);
175 #define switch_to(prev,next,last) \
177 __switch_to(prev->thread_info,next->thread_info); \
183 #error SMP not supported
184 #endif /* CONFIG_SMP */
186 #define irqs_disabled() \
188 unsigned long flags; \
189 local_save_flags(flags); \
193 #define set_mb(var, value) do { var = value; mb(); } while (0)
194 #define smp_mb() barrier()
195 #define smp_rmb() barrier()
196 #define smp_wmb() barrier()
197 #define smp_read_barrier_depends() do { } while(0)
199 #define clf() __clf()
200 #define stf() __stf()
202 #endif /* __KERNEL__ */