1 #ifndef _ASM_M32R_SYSTEM_H
2 #define _ASM_M32R_SYSTEM_H
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 #include <linux/compiler.h>
14 #include <linux/irqflags.h>
15 #include <asm/assembler.h>
20 * switch_to(prev, next) should switch from task `prev' to `next'
21 * `prev' will never be the same as `next'.
23 * `next' and `prev' should be struct task_struct, but it isn't always defined
26 #if defined(CONFIG_FRAME_POINTER) || \
27 !defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
28 #define M32R_PUSH_FP " push fp\n"
29 #define M32R_POP_FP " pop fp\n"
31 #define M32R_PUSH_FP ""
32 #define M32R_POP_FP ""
35 #define switch_to(prev, next, last) do { \
36 __asm__ __volatile__ ( \
37 " seth lr, #high(1f) \n" \
38 " or3 lr, lr, #low(1f) \n" \
39 " st lr, @%4 ; store old LR \n" \
40 " ld lr, @%5 ; load new LR \n" \
42 " st sp, @%2 ; store old SP \n" \
43 " ld sp, @%3 ; load new SP \n" \
44 " push %1 ; store `prev' on new stack \n" \
48 " pop %0 ; restore `__last' from new stack \n" \
52 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
53 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
58 #define nop() __asm__ __volatile__ ("nop" : : )
60 #define xchg(ptr, x) \
61 ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
62 #define xchg_local(ptr, x) \
63 ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
66 extern void __xchg_called_with_bad_pointer(void);
68 #ifdef CONFIG_CHIP_M32700_TS1
69 #define DCACHE_CLEAR(reg0, reg1, addr) \
70 "seth "reg1", #high(dcache_dummy); \n\t" \
71 "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \
72 "lock "reg0", @"reg1"; \n\t" \
73 "add3 "reg0", "addr", #0x1000; \n\t" \
74 "ld "reg0", @"reg0"; \n\t" \
75 "add3 "reg0", "addr", #0x2000; \n\t" \
76 "ld "reg0", @"reg0"; \n\t" \
77 "unlock "reg0", @"reg1"; \n\t"
78 /* FIXME: This workaround code cannot handle kernel modules
79 * correctly under SMP environment.
81 #else /* CONFIG_CHIP_M32700_TS1 */
82 #define DCACHE_CLEAR(reg0, reg1, addr)
83 #endif /* CONFIG_CHIP_M32700_TS1 */
85 static __always_inline
unsigned long
86 __xchg(unsigned long x
, volatile void *ptr
, int size
)
89 unsigned long tmp
= 0;
91 local_irq_save(flags
);
96 __asm__
__volatile__ (
99 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
102 __asm__
__volatile__ (
105 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
108 __asm__
__volatile__ (
111 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
113 #else /* CONFIG_SMP */
115 __asm__
__volatile__ (
116 DCACHE_CLEAR("%0", "r4", "%2")
118 "unlock %1, @%2; \n\t"
119 : "=&r" (tmp
) : "r" (x
), "r" (ptr
)
121 #ifdef CONFIG_CHIP_M32700_TS1
123 #endif /* CONFIG_CHIP_M32700_TS1 */
126 #endif /* CONFIG_SMP */
128 __xchg_called_with_bad_pointer();
131 local_irq_restore(flags
);
136 static __always_inline
unsigned long
137 __xchg_local(unsigned long x
, volatile void *ptr
, int size
)
140 unsigned long tmp
= 0;
142 local_irq_save(flags
);
146 __asm__
__volatile__ (
149 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
152 __asm__
__volatile__ (
155 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
158 __asm__
__volatile__ (
161 : "=&r" (tmp
) : "r" (x
), "r" (ptr
) : "memory");
164 __xchg_called_with_bad_pointer();
167 local_irq_restore(flags
);
172 #define __HAVE_ARCH_CMPXCHG 1
174 static inline unsigned long
175 __cmpxchg_u32(volatile unsigned int *p
, unsigned int old
, unsigned int new)
180 local_irq_save(flags
);
181 __asm__
__volatile__ (
182 DCACHE_CLEAR("%0", "r4", "%1")
183 M32R_LOCK
" %0, @%1; \n"
184 " bne %0, %2, 1f; \n"
185 M32R_UNLOCK
" %3, @%1; \n"
189 M32R_UNLOCK
" %0, @%1; \n"
193 : "r" (p
), "r" (old
), "r" (new)
195 #ifdef CONFIG_CHIP_M32700_TS1
197 #endif /* CONFIG_CHIP_M32700_TS1 */
199 local_irq_restore(flags
);
204 static inline unsigned long
205 __cmpxchg_local_u32(volatile unsigned int *p
, unsigned int old
,
211 local_irq_save(flags
);
212 __asm__
__volatile__ (
213 DCACHE_CLEAR("%0", "r4", "%1")
215 " bne %0, %2, 1f; \n"
224 : "r" (p
), "r" (old
), "r" (new)
226 #ifdef CONFIG_CHIP_M32700_TS1
228 #endif /* CONFIG_CHIP_M32700_TS1 */
230 local_irq_restore(flags
);
235 /* This function doesn't exist, so you'll get a linker error
236 if something tries to do an invalid cmpxchg(). */
237 extern void __cmpxchg_called_with_bad_pointer(void);
239 static inline unsigned long
240 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
244 return __cmpxchg_u32(ptr
, old
, new);
245 #if 0 /* we don't have __cmpxchg_u64 */
247 return __cmpxchg_u64(ptr
, old
, new);
250 __cmpxchg_called_with_bad_pointer();
254 #define cmpxchg(ptr, o, n) \
255 ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \
256 (unsigned long)(n), sizeof(*(ptr))))
258 #include <asm-generic/cmpxchg-local.h>
260 static inline unsigned long __cmpxchg_local(volatile void *ptr
,
262 unsigned long new, int size
)
266 return __cmpxchg_local_u32(ptr
, old
, new);
268 return __cmpxchg_local_generic(ptr
, old
, new, size
);
275 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
278 #define cmpxchg_local(ptr, o, n) \
279 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
280 (unsigned long)(n), sizeof(*(ptr))))
281 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
283 #endif /* __KERNEL__ */
288 * mb() prevents loads and stores being reordered across this point.
289 * rmb() prevents loads being reordered across this point.
290 * wmb() prevents stores being reordered across this point.
292 #define mb() barrier()
297 * read_barrier_depends - Flush all pending reads that subsequents reads
300 * No data-dependent reads from memory-like regions are ever reordered
301 * over this barrier. All reads preceding this primitive are guaranteed
302 * to access memory (but not necessarily other CPUs' caches) before any
303 * reads following this primitive that depend on the data return by
304 * any of the preceding reads. This primitive is much lighter weight than
305 * rmb() on most CPUs, and is never heavier weight than is
308 * These ordering constraints are respected by both the local CPU
311 * Ordering is not guaranteed by anything other than these primitives,
312 * not even by data dependencies. See the documentation for
313 * memory_barrier() for examples and URLs to more information.
315 * For example, the following code would force ordering (the initial
316 * value of "a" is zero, "b" is one, and "p" is "&a"):
324 * read_barrier_depends();
329 * because the read of "*q" depends on the read of "p" and these
330 * two reads are separated by a read_barrier_depends(). However,
331 * the following code, with the same initial values for "a" and "b":
339 * read_barrier_depends();
343 * does not enforce ordering, since there is no data dependency between
344 * the read of "a" and the read of "b". Therefore, on some CPUs, such
345 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
346 * in cases like this where there are no data dependencies.
349 #define read_barrier_depends() do { } while (0)
352 #define smp_mb() mb()
353 #define smp_rmb() rmb()
354 #define smp_wmb() wmb()
355 #define smp_read_barrier_depends() read_barrier_depends()
356 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
358 #define smp_mb() barrier()
359 #define smp_rmb() barrier()
360 #define smp_wmb() barrier()
361 #define smp_read_barrier_depends() do { } while (0)
362 #define set_mb(var, value) do { var = value; barrier(); } while (0)
365 #define arch_align_stack(x) (x)
367 #endif /* _ASM_M32R_SYSTEM_H */