1 #ifndef _ASM_M32R_SYSTEM_H
2 #define _ASM_M32R_SYSTEM_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 #include <linux/config.h>
14 #include <asm/assembler.h>
19 * switch_to(prev, next) should switch from task `prev' to `next'
20 * `prev' will never be the same as `next'.
22 * `next' and `prev' should be task_t, but it isn't always defined
25 #define switch_to(prev, next, last) do { \
26 __asm__ __volatile__ ( \
27 " seth lr, #high(1f) \n" \
28 " or3 lr, lr, #low(1f) \n" \
29 " st lr, @%4 ; store old LR \n" \
30 " ld lr, @%5 ; load new LR \n" \
31 " st sp, @%2 ; store old SP \n" \
32 " ld sp, @%3 ; load new SP \n" \
33 " push %1 ; store `prev' on new stack \n" \
37 " pop %0 ; restore `__last' from new stack \n" \
40 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
41 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
47 * On SMP systems, when the scheduler does migration-cost autodetection,
48 * it needs a way to flush as much of the CPU's caches as possible.
52 static inline void sched_cacheflush(void)
56 /* Interrupt Control */
57 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
58 #define local_irq_enable() \
59 __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
60 #define local_irq_disable() \
61 __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
62 #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
63 static inline void local_irq_enable(void)
68 "or3 %0, %0, #0x0040; \n\t"
70 : "=&r" (tmpreg
) : : "cbit", "memory");
73 static inline void local_irq_disable(void)
75 unsigned long tmpreg0
, tmpreg1
;
77 "ld24 %0, #0 ; Use 32-bit insn. \n\t"
78 "mvfc %1, psw ; No interrupt can be accepted here. \n\t"
80 "and3 %0, %1, #0xffbf \n\t"
82 : "=&r" (tmpreg0
), "=&r" (tmpreg1
) : : "cbit", "memory");
84 #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
86 #define local_save_flags(x) \
87 __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
89 #define local_irq_restore(x) \
90 __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
91 : "r" (x) : "cbit", "memory")
93 #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
94 #define local_irq_save(x) \
95 __asm__ __volatile__( \
96 "mvfc %0, psw; \n\t" \
97 "clrpsw #0x40 -> nop; \n\t" \
98 : "=r" (x) : /* no input */ : "memory")
99 #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
100 #define local_irq_save(x) \
102 unsigned long tmpreg; \
103 __asm__ __volatile__( \
105 "mvfc %0, psw \n\t" \
106 "mvtc %1, psw \n\t" \
107 "and3 %1, %0, #0xffbf \n\t" \
108 "mvtc %1, psw \n\t" \
109 : "=r" (x), "=&r" (tmpreg) \
110 : : "cbit", "memory"); \
112 #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
114 #define irqs_disabled() \
116 unsigned long flags; \
117 local_save_flags(flags); \
121 #define nop() __asm__ __volatile__ ("nop" : : )
123 #define xchg(ptr,x) \
124 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
126 #define tas(ptr) (xchg((ptr),1))
129 extern void __xchg_called_with_bad_pointer(void);
132 #ifdef CONFIG_CHIP_M32700_TS1
133 #define DCACHE_CLEAR(reg0, reg1, addr) \
134 "seth "reg1", #high(dcache_dummy); \n\t" \
135 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
136 "lock "reg0", @"reg1"; \n\t" \
137 "add3 "reg0", "addr", #0x1000; \n\t" \
138 "ld "reg0", @"reg0"; \n\t" \
139 "add3 "reg0", "addr", #0x2000; \n\t" \
140 "ld "reg0", @"reg0"; \n\t" \
141 "unlock "reg0", @"reg1"; \n\t"
142 /* FIXME: This workaround code cannot handle kenrel modules
143 * correctly under SMP environment.
145 #else /* CONFIG_CHIP_M32700_TS1 */
146 #define DCACHE_CLEAR(reg0, reg1, addr)
147 #endif /* CONFIG_CHIP_M32700_TS1 */
149 static inline unsigned long
150 __xchg(unsigned long x
, volatile void * ptr
, int size
)
153 unsigned long tmp
= 0;
155 local_irq_save(flags
);
160 __asm__
__volatile__ (
163 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
166 __asm__
__volatile__ (
169 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
172 __asm__
__volatile__ (
175 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
177 #else /* CONFIG_SMP */
179 __asm__
__volatile__ (
180 DCACHE_CLEAR("%0", "r4", "%2")
182 "unlock %1, @%2; \n\t"
183 : "=&r" (tmp
) : "r" (x
), "r" (ptr
)
185 #ifdef CONFIG_CHIP_M32700_TS1
187 #endif /* CONFIG_CHIP_M32700_TS1 */
191 __xchg_called_with_bad_pointer();
192 #endif /* CONFIG_SMP */
195 local_irq_restore(flags
);
200 #define __HAVE_ARCH_CMPXCHG 1
202 static inline unsigned long
203 __cmpxchg_u32(volatile unsigned int *p
, unsigned int old
, unsigned int new)
208 local_irq_save(flags
);
209 __asm__
__volatile__ (
210 DCACHE_CLEAR("%0", "r4", "%1")
211 M32R_LOCK
" %0, @%1; \n"
212 " bne %0, %2, 1f; \n"
213 M32R_UNLOCK
" %3, @%1; \n"
217 M32R_UNLOCK
" %0, @%1; \n"
221 : "r" (p
), "r" (old
), "r" (new)
223 #ifdef CONFIG_CHIP_M32700_TS1
225 #endif /* CONFIG_CHIP_M32700_TS1 */
227 local_irq_restore(flags
);
232 /* This function doesn't exist, so you'll get a linker error
233 if something tries to do an invalid cmpxchg(). */
234 extern void __cmpxchg_called_with_bad_pointer(void);
236 static inline unsigned long
237 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
241 return __cmpxchg_u32(ptr
, old
, new);
242 #if 0 /* we don't have __cmpxchg_u64 */
244 return __cmpxchg_u64(ptr
, old
, new);
247 __cmpxchg_called_with_bad_pointer();
251 #define cmpxchg(ptr,o,n) \
253 __typeof__(*(ptr)) _o_ = (o); \
254 __typeof__(*(ptr)) _n_ = (n); \
255 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
256 (unsigned long)_n_, sizeof(*(ptr))); \
259 #endif /* __KERNEL__ */
264 * mb() prevents loads and stores being reordered across this point.
265 * rmb() prevents loads being reordered across this point.
266 * wmb() prevents stores being reordered across this point.
268 #define mb() barrier()
273 * read_barrier_depends - Flush all pending reads that subsequents reads
276 * No data-dependent reads from memory-like regions are ever reordered
277 * over this barrier. All reads preceding this primitive are guaranteed
278 * to access memory (but not necessarily other CPUs' caches) before any
279 * reads following this primitive that depend on the data return by
280 * any of the preceding reads. This primitive is much lighter weight than
281 * rmb() on most CPUs, and is never heavier weight than is
284 * These ordering constraints are respected by both the local CPU
287 * Ordering is not guaranteed by anything other than these primitives,
288 * not even by data dependencies. See the documentation for
289 * memory_barrier() for examples and URLs to more information.
291 * For example, the following code would force ordering (the initial
292 * value of "a" is zero, "b" is one, and "p" is "&a"):
300 * read_barrier_depends();
305 * because the read of "*q" depends on the read of "p" and these
306 * two reads are separated by a read_barrier_depends(). However,
307 * the following code, with the same initial values for "a" and "b":
315 * read_barrier_depends();
319 * does not enforce ordering, since there is no data dependency between
320 * the read of "a" and the read of "b". Therefore, on some CPUs, such
321 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
322 * in cases like thiswhere there are no data dependencies.
325 #define read_barrier_depends() do { } while (0)
328 #define smp_mb() mb()
329 #define smp_rmb() rmb()
330 #define smp_wmb() wmb()
331 #define smp_read_barrier_depends() read_barrier_depends()
333 #define smp_mb() barrier()
334 #define smp_rmb() barrier()
335 #define smp_wmb() barrier()
336 #define smp_read_barrier_depends() do { } while (0)
339 #define set_mb(var, value) do { xchg(&var, value); } while (0)
340 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
342 #define arch_align_stack(x) (x)
344 #endif /* _ASM_M32R_SYSTEM_H */