fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-arm / system.h
blob94ea8c6dc1a4485014fd9bb8fbd7c29078e69696
1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
4 #ifdef __KERNEL__
6 #include <asm/memory.h>
8 #define CPU_ARCH_UNKNOWN 0
9 #define CPU_ARCH_ARMv3 1
10 #define CPU_ARCH_ARMv4 2
11 #define CPU_ARCH_ARMv4T 3
12 #define CPU_ARCH_ARMv5 4
13 #define CPU_ARCH_ARMv5T 5
14 #define CPU_ARCH_ARMv5TE 6
15 #define CPU_ARCH_ARMv5TEJ 7
16 #define CPU_ARCH_ARMv6 8
17 #define CPU_ARCH_ARMv7 9
20 * CR1 bits (CP#15 CR1)
22 #define CR_M (1 << 0) /* MMU enable */
23 #define CR_A (1 << 1) /* Alignment abort enable */
24 #define CR_C (1 << 2) /* Dcache enable */
25 #define CR_W (1 << 3) /* Write buffer enable */
26 #define CR_P (1 << 4) /* 32-bit exception handler */
27 #define CR_D (1 << 5) /* 32-bit data address range */
28 #define CR_L (1 << 6) /* Implementation defined */
29 #define CR_B (1 << 7) /* Big endian */
30 #define CR_S (1 << 8) /* System MMU protection */
31 #define CR_R (1 << 9) /* ROM MMU protection */
32 #define CR_F (1 << 10) /* Implementation defined */
33 #define CR_Z (1 << 11) /* Implementation defined */
34 #define CR_I (1 << 12) /* Icache enable */
35 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
36 #define CR_RR (1 << 14) /* Round Robin cache replacement */
37 #define CR_L4 (1 << 15) /* LDR pc can set T bit */
38 #define CR_DT (1 << 16)
39 #define CR_IT (1 << 18)
40 #define CR_ST (1 << 19)
41 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
42 #define CR_U (1 << 22) /* Unaligned access operation */
43 #define CR_XP (1 << 23) /* Extended page tables */
44 #define CR_VE (1 << 24) /* Vectored interrupts */
46 #define CPUID_ID 0
47 #define CPUID_CACHETYPE 1
48 #define CPUID_TCM 2
49 #define CPUID_TLBTYPE 3
51 #ifdef CONFIG_CPU_CP15
52 #define read_cpuid(reg) \
53 ({ \
54 unsigned int __val; \
55 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
56 : "=r" (__val) \
57 : \
58 : "cc"); \
59 __val; \
61 #else
62 #define read_cpuid(reg) (processor_id)
63 #endif
66 * This is used to ensure the compiler did actually allocate the register we
67 * asked it for some inline assembly sequences. Apparently we can't trust
68 * the compiler from one version to another so a bit of paranoia won't hurt.
69 * This string is meant to be concatenated with the inline asm string and
70 * will cause compilation to stop on mismatch.
71 * (for details, see gcc PR 15089)
73 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
75 #ifndef __ASSEMBLY__
77 #include <linux/linkage.h>
78 #include <linux/irqflags.h>
80 #define __exception __attribute__((section(".exception.text")))
82 struct thread_info;
83 struct task_struct;
85 /* information about the system we're running on */
86 extern unsigned int system_rev;
87 extern unsigned int system_serial_low;
88 extern unsigned int system_serial_high;
89 extern unsigned int mem_fclk_21285;
91 struct pt_regs;
93 void die(const char *msg, struct pt_regs *regs, int err)
94 __attribute__((noreturn));
96 struct siginfo;
97 void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
98 unsigned long err, unsigned long trap);
100 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
101 struct pt_regs *),
102 int sig, const char *name);
104 #define xchg(ptr,x) \
105 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
107 extern asmlinkage void __backtrace(void);
108 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
110 struct mm_struct;
111 extern void show_pte(struct mm_struct *mm, unsigned long addr);
112 extern void __show_regs(struct pt_regs *);
114 extern int cpu_architecture(void);
115 extern void cpu_init(void);
117 void arm_machine_restart(char mode);
118 extern void (*arm_pm_restart)(char str);
121 * Intel's XScale3 core supports some v6 features (supersections, L2)
122 * but advertises itself as v5 as it does not support the v6 ISA. For
123 * this reason, we need a way to explicitly test for this type of CPU.
125 #ifndef CONFIG_CPU_XSC3
126 #define cpu_is_xsc3() 0
127 #else
128 static inline int cpu_is_xsc3(void)
130 extern unsigned int processor_id;
132 if ((processor_id & 0xffffe000) == 0x69056000)
133 return 1;
135 return 0;
137 #endif
139 #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
140 #define cpu_is_xscale() 0
141 #else
142 #define cpu_is_xscale() 1
143 #endif
145 #define UDBG_UNDEFINED (1 << 0)
146 #define UDBG_SYSCALL (1 << 1)
147 #define UDBG_BADABORT (1 << 2)
148 #define UDBG_SEGV (1 << 3)
149 #define UDBG_BUS (1 << 4)
151 extern unsigned int user_debug;
153 #if __LINUX_ARM_ARCH__ >= 4
154 #define vectors_high() (cr_alignment & CR_V)
155 #else
156 #define vectors_high() (0)
157 #endif
159 #if __LINUX_ARM_ARCH__ >= 7
160 #define isb() __asm__ __volatile__ ("isb" : : : "memory")
161 #define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
162 #define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
163 #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
164 #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
165 : : "r" (0) : "memory")
166 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
167 : : "r" (0) : "memory")
168 #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
169 : : "r" (0) : "memory")
170 #else
171 #define isb() __asm__ __volatile__ ("" : : : "memory")
172 #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
173 : : "r" (0) : "memory")
174 #define dmb() __asm__ __volatile__ ("" : : : "memory")
175 #endif
177 #ifndef CONFIG_SMP
178 #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
179 #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
180 #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
181 #define smp_mb() barrier()
182 #define smp_rmb() barrier()
183 #define smp_wmb() barrier()
184 #else
185 #define mb() dmb()
186 #define rmb() dmb()
187 #define wmb() dmb()
188 #define smp_mb() dmb()
189 #define smp_rmb() dmb()
190 #define smp_wmb() dmb()
191 #endif
192 #define read_barrier_depends() do { } while(0)
193 #define smp_read_barrier_depends() do { } while(0)
195 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
196 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
198 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
199 extern unsigned long cr_alignment; /* defined in entry-armv.S */
201 static inline unsigned int get_cr(void)
203 unsigned int val;
204 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
205 return val;
208 static inline void set_cr(unsigned int val)
210 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
211 : : "r" (val) : "cc");
212 isb();
215 #ifndef CONFIG_SMP
216 extern void adjust_cr(unsigned long mask, unsigned long set);
217 #endif
219 #define CPACC_FULL(n) (3 << (n * 2))
220 #define CPACC_SVC(n) (1 << (n * 2))
221 #define CPACC_DISABLE(n) (0 << (n * 2))
223 static inline unsigned int get_copro_access(void)
225 unsigned int val;
226 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
227 : "=r" (val) : : "cc");
228 return val;
231 static inline void set_copro_access(unsigned int val)
233 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
234 : : "r" (val) : "cc");
235 isb();
239 * switch_mm() may do a full cache flush over the context switch,
240 * so enable interrupts over the context switch to avoid high
241 * latency.
243 #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
246 * switch_to(prev, next) should switch from task `prev' to `next'
247 * `prev' will never be the same as `next'. schedule() itself
248 * contains the memory barrier to tell GCC not to cache `current'.
250 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
252 #define switch_to(prev,next,last) \
253 do { \
254 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
255 } while (0)
257 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
259 * On the StrongARM, "swp" is terminally broken since it bypasses the
260 * cache totally. This means that the cache becomes inconsistent, and,
261 * since we use normal loads/stores as well, this is really bad.
262 * Typically, this causes oopsen in filp_close, but could have other,
263 * more disasterous effects. There are two work-arounds:
264 * 1. Disable interrupts and emulate the atomic swap
265 * 2. Clean the cache, perform atomic swap, flush the cache
267 * We choose (1) since its the "easiest" to achieve here and is not
268 * dependent on the processor type.
270 * NOTE that this solution won't work on an SMP system, so explcitly
271 * forbid it here.
273 #define swp_is_buggy
274 #endif
276 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
278 extern void __bad_xchg(volatile void *, int);
279 unsigned long ret;
280 #ifdef swp_is_buggy
281 unsigned long flags;
282 #endif
283 #if __LINUX_ARM_ARCH__ >= 6
284 unsigned int tmp;
285 #endif
287 switch (size) {
288 #if __LINUX_ARM_ARCH__ >= 6
289 case 1:
290 asm volatile("@ __xchg1\n"
291 "1: ldrexb %0, [%3]\n"
292 " strexb %1, %2, [%3]\n"
293 " teq %1, #0\n"
294 " bne 1b"
295 : "=&r" (ret), "=&r" (tmp)
296 : "r" (x), "r" (ptr)
297 : "memory", "cc");
298 break;
299 case 4:
300 asm volatile("@ __xchg4\n"
301 "1: ldrex %0, [%3]\n"
302 " strex %1, %2, [%3]\n"
303 " teq %1, #0\n"
304 " bne 1b"
305 : "=&r" (ret), "=&r" (tmp)
306 : "r" (x), "r" (ptr)
307 : "memory", "cc");
308 break;
309 #elif defined(swp_is_buggy)
310 #ifdef CONFIG_SMP
311 #error SMP is not supported on this platform
312 #endif
313 case 1:
314 raw_local_irq_save(flags);
315 ret = *(volatile unsigned char *)ptr;
316 *(volatile unsigned char *)ptr = x;
317 raw_local_irq_restore(flags);
318 break;
320 case 4:
321 raw_local_irq_save(flags);
322 ret = *(volatile unsigned long *)ptr;
323 *(volatile unsigned long *)ptr = x;
324 raw_local_irq_restore(flags);
325 break;
326 #else
327 case 1:
328 asm volatile("@ __xchg1\n"
329 " swpb %0, %1, [%2]"
330 : "=&r" (ret)
331 : "r" (x), "r" (ptr)
332 : "memory", "cc");
333 break;
334 case 4:
335 asm volatile("@ __xchg4\n"
336 " swp %0, %1, [%2]"
337 : "=&r" (ret)
338 : "r" (x), "r" (ptr)
339 : "memory", "cc");
340 break;
341 #endif
342 default:
343 __bad_xchg(ptr, size), ret = 0;
344 break;
347 return ret;
350 extern void disable_hlt(void);
351 extern void enable_hlt(void);
353 #endif /* __ASSEMBLY__ */
355 #define arch_align_stack(x) (x)
357 #endif /* __KERNEL__ */
359 #endif