2 * linux/arch/unicore32/include/asm/system.h
4 * Code specific to PKUnity SoC and UniCore ISA
6 * Copyright (C) 2001-2010 GUAN Xue-tao
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #ifndef __UNICORE_SYSTEM_H__
13 #define __UNICORE_SYSTEM_H__
20 #define CR_M (1 << 0) /* MMU enable */
21 #define CR_A (1 << 1) /* Alignment abort enable */
22 #define CR_D (1 << 2) /* Dcache enable */
23 #define CR_I (1 << 3) /* Icache enable */
24 #define CR_B (1 << 4) /* Dcache write mechanism: write back */
25 #define CR_T (1 << 5) /* Burst enable */
26 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
30 #include <linux/linkage.h>
31 #include <linux/irqflags.h>
38 void die(const char *msg
, struct pt_regs
*regs
, int err
);
41 void uc32_notify_die(const char *str
, struct pt_regs
*regs
,
42 struct siginfo
*info
, unsigned long err
, unsigned long trap
);
44 void hook_fault_code(int nr
, int (*fn
)(unsigned long, unsigned int,
46 int sig
, int code
, const char *name
);
48 #define xchg(ptr, x) \
49 ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
51 extern asmlinkage
void __backtrace(void);
52 extern asmlinkage
void c_backtrace(unsigned long fp
, int pmode
);
55 extern void show_pte(struct mm_struct
*mm
, unsigned long addr
);
56 extern void __show_regs(struct pt_regs
*);
58 extern int cpu_architecture(void);
59 extern void cpu_init(void);
61 #define vectors_high() (cr_alignment & CR_V)
63 #define isb() __asm__ __volatile__ ("" : : : "memory")
64 #define dsb() __asm__ __volatile__ ("" : : : "memory")
65 #define dmb() __asm__ __volatile__ ("" : : : "memory")
67 #define mb() barrier()
68 #define rmb() barrier()
69 #define wmb() barrier()
70 #define smp_mb() barrier()
71 #define smp_rmb() barrier()
72 #define smp_wmb() barrier()
73 #define read_barrier_depends() do { } while (0)
74 #define smp_read_barrier_depends() do { } while (0)
76 #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
77 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
79 extern unsigned long cr_no_alignment
; /* defined in entry-unicore.S */
80 extern unsigned long cr_alignment
; /* defined in entry-unicore.S */
82 static inline unsigned int get_cr(void)
85 asm("movc %0, p0.c1, #0" : "=r" (val
) : : "cc");
89 static inline void set_cr(unsigned int val
)
91 asm volatile("movc p0.c1, %0, #0 @set CR"
92 : : "r" (val
) : "cc");
96 extern void adjust_cr(unsigned long mask
, unsigned long set
);
99 * switch_to(prev, next) should switch from task `prev' to `next'
100 * `prev' will never be the same as `next'. schedule() itself
101 * contains the memory barrier to tell GCC not to cache `current'.
103 extern struct task_struct
*__switch_to(struct task_struct
*,
104 struct thread_info
*, struct thread_info
*);
105 extern void panic(const char *fmt
, ...);
107 #define switch_to(prev, next, last) \
109 last = __switch_to(prev, \
110 task_thread_info(prev), task_thread_info(next)); \
113 static inline unsigned long
114 __xchg(unsigned long x
, volatile void *ptr
, int size
)
120 asm volatile("@ __xchg1\n"
121 " swapb %0, %1, [%2]"
127 asm volatile("@ __xchg4\n"
128 " swapw %0, %1, [%2]"
134 panic("xchg: bad data size: ptr 0x%p, size %d\n",
141 #include <asm-generic/cmpxchg-local.h>
144 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
147 #define cmpxchg_local(ptr, o, n) \
148 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
149 (unsigned long)(o), (unsigned long)(n), sizeof(*(ptr))))
150 #define cmpxchg64_local(ptr, o, n) \
151 __cmpxchg64_local_generic((ptr), (o), (n))
153 #include <asm-generic/cmpxchg.h>
155 #endif /* __ASSEMBLY__ */
157 #define arch_align_stack(x) (x)
159 #endif /* __KERNEL__ */