[AVR32] Provide more CPU information in /proc/cpuinfo and dmesg
[pv_ops_mirror.git] / include / asm-sh / system.h
blob4faa2fb886163007f57af8ec54ad45769b2bf2d3
1 #ifndef __ASM_SH_SYSTEM_H
2 #define __ASM_SH_SYSTEM_H
4 /*
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2002 Paul Mundt
7 */
9 #include <linux/irqflags.h>
10 #include <linux/compiler.h>
11 #include <linux/linkage.h>
12 #include <asm/types.h>
13 #include <asm/ptrace.h>
15 struct task_struct *__switch_to(struct task_struct *prev,
16 struct task_struct *next);
18 #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
20 * switch_to() should switch tasks to task nr n, first
23 #define switch_to(prev, next, last) do { \
24 struct task_struct *__last; \
25 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
26 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
27 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
28 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
29 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
30 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
31 __asm__ __volatile__ (".balign 4\n\t" \
32 "stc.l gbr, @-r15\n\t" \
33 "sts.l pr, @-r15\n\t" \
34 "mov.l r8, @-r15\n\t" \
35 "mov.l r9, @-r15\n\t" \
36 "mov.l r10, @-r15\n\t" \
37 "mov.l r11, @-r15\n\t" \
38 "mov.l r12, @-r15\n\t" \
39 "mov.l r13, @-r15\n\t" \
40 "mov.l r14, @-r15\n\t" \
41 "mov.l r15, @r1 ! save SP\n\t" \
42 "mov.l @r6, r15 ! change to new stack\n\t" \
43 "mova 1f, %0\n\t" \
44 "mov.l %0, @r2 ! save PC\n\t" \
45 "mov.l 2f, %0\n\t" \
46 "jmp @%0 ! call __switch_to\n\t" \
47 " lds r7, pr ! with return to new PC\n\t" \
48 ".balign 4\n" \
49 "2:\n\t" \
50 ".long __switch_to\n" \
51 "1:\n\t" \
52 "mov.l @r15+, r14\n\t" \
53 "mov.l @r15+, r13\n\t" \
54 "mov.l @r15+, r12\n\t" \
55 "mov.l @r15+, r11\n\t" \
56 "mov.l @r15+, r10\n\t" \
57 "mov.l @r15+, r9\n\t" \
58 "mov.l @r15+, r8\n\t" \
59 "lds.l @r15+, pr\n\t" \
60 "ldc.l @r15+, gbr\n\t" \
61 : "=z" (__last) \
62 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
63 "r" (__ts5), "r" (__ts6), "r" (__ts7) \
64 : "r3", "t"); \
65 last = __last; \
66 } while (0)
68 #ifdef CONFIG_CPU_SH4A
69 #define __icbi() \
70 { \
71 unsigned long __addr; \
72 __addr = 0xa8000000; \
73 __asm__ __volatile__( \
74 "icbi %0\n\t" \
75 : /* no output */ \
76 : "m" (__m(__addr))); \
78 #endif
81 * A brief note on ctrl_barrier(), the control register write barrier.
83 * Legacy SH cores typically require a sequence of 8 nops after
84 * modification of a control register in order for the changes to take
85 * effect. On newer cores (like the sh4a and sh5) this is accomplished
86 * with icbi.
88 * Also note that on sh4a in the icbi case we can forego a synco for the
89 * write barrier, as it's not necessary for control registers.
91 * Historically we have only done this type of barrier for the MMUCR, but
92 * it's also necessary for the CCR, so we make it generic here instead.
94 #ifdef CONFIG_CPU_SH4A
95 #define mb() __asm__ __volatile__ ("synco": : :"memory")
96 #define rmb() mb()
97 #define wmb() __asm__ __volatile__ ("synco": : :"memory")
98 #define ctrl_barrier() __icbi()
99 #define read_barrier_depends() do { } while(0)
100 #else
101 #define mb() __asm__ __volatile__ ("": : :"memory")
102 #define rmb() mb()
103 #define wmb() __asm__ __volatile__ ("": : :"memory")
104 #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
105 #define read_barrier_depends() do { } while(0)
106 #endif
108 #ifdef CONFIG_SMP
109 #define smp_mb() mb()
110 #define smp_rmb() rmb()
111 #define smp_wmb() wmb()
112 #define smp_read_barrier_depends() read_barrier_depends()
113 #else
114 #define smp_mb() barrier()
115 #define smp_rmb() barrier()
116 #define smp_wmb() barrier()
117 #define smp_read_barrier_depends() do { } while(0)
118 #endif
120 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
123 * Jump to P2 area.
124 * When handling TLB or caches, we need to do it from P2 area.
126 #define jump_to_P2() \
127 do { \
128 unsigned long __dummy; \
129 __asm__ __volatile__( \
130 "mov.l 1f, %0\n\t" \
131 "or %1, %0\n\t" \
132 "jmp @%0\n\t" \
133 " nop\n\t" \
134 ".balign 4\n" \
135 "1: .long 2f\n" \
136 "2:" \
137 : "=&r" (__dummy) \
138 : "r" (0x20000000)); \
139 } while (0)
142 * Back to P1 area.
144 #define back_to_P1() \
145 do { \
146 unsigned long __dummy; \
147 ctrl_barrier(); \
148 __asm__ __volatile__( \
149 "mov.l 1f, %0\n\t" \
150 "jmp @%0\n\t" \
151 " nop\n\t" \
152 ".balign 4\n" \
153 "1: .long 2f\n" \
154 "2:" \
155 : "=&r" (__dummy)); \
156 } while (0)
158 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
160 unsigned long flags, retval;
162 local_irq_save(flags);
163 retval = *m;
164 *m = val;
165 local_irq_restore(flags);
166 return retval;
169 static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
171 unsigned long flags, retval;
173 local_irq_save(flags);
174 retval = *m;
175 *m = val & 0xff;
176 local_irq_restore(flags);
177 return retval;
180 extern void __xchg_called_with_bad_pointer(void);
182 #define __xchg(ptr, x, size) \
183 ({ \
184 unsigned long __xchg__res; \
185 volatile void *__xchg_ptr = (ptr); \
186 switch (size) { \
187 case 4: \
188 __xchg__res = xchg_u32(__xchg_ptr, x); \
189 break; \
190 case 1: \
191 __xchg__res = xchg_u8(__xchg_ptr, x); \
192 break; \
193 default: \
194 __xchg_called_with_bad_pointer(); \
195 __xchg__res = x; \
196 break; \
199 __xchg__res; \
202 #define xchg(ptr,x) \
203 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
205 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
206 unsigned long new)
208 __u32 retval;
209 unsigned long flags;
211 local_irq_save(flags);
212 retval = *m;
213 if (retval == old)
214 *m = new;
215 local_irq_restore(flags); /* implies memory barrier */
216 return retval;
219 /* This function doesn't exist, so you'll get a linker error
220 * if something tries to do an invalid cmpxchg(). */
221 extern void __cmpxchg_called_with_bad_pointer(void);
223 #define __HAVE_ARCH_CMPXCHG 1
225 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
226 unsigned long new, int size)
228 switch (size) {
229 case 4:
230 return __cmpxchg_u32(ptr, old, new);
232 __cmpxchg_called_with_bad_pointer();
233 return old;
236 #define cmpxchg(ptr,o,n) \
237 ({ \
238 __typeof__(*(ptr)) _o_ = (o); \
239 __typeof__(*(ptr)) _n_ = (n); \
240 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
241 (unsigned long)_n_, sizeof(*(ptr))); \
244 extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
246 extern void *set_exception_table_vec(unsigned int vec, void *handler);
248 static inline void *set_exception_table_evt(unsigned int evt, void *handler)
250 return set_exception_table_vec(evt >> 5, handler);
254 * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks.
256 #ifdef CONFIG_CPU_SH2A
257 extern unsigned int instruction_size(unsigned int insn);
258 #else
259 #define instruction_size(insn) (2)
260 #endif
262 /* XXX
263 * disable hlt during certain critical i/o operations
265 #define HAVE_DISABLE_HLT
266 void disable_hlt(void);
267 void enable_hlt(void);
269 void default_idle(void);
270 void per_cpu_trap_init(void);
272 asmlinkage void break_point_trap(void);
273 asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
274 unsigned long r6, unsigned long r7,
275 struct pt_regs __regs);
276 asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
277 unsigned long r6, unsigned long r7,
278 struct pt_regs __regs);
280 #define arch_align_stack(x) (x)
282 #endif