4 * Copyright (c) 2005 blackfin.uclinux.org
6 * See file CREDITS for list of people who contributed to this
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
25 #ifndef _BLACKFIN_SYSTEM_H
26 #define _BLACKFIN_SYSTEM_H
28 #include <linux/config.h> /* get configuration macros */
29 #include <asm/linkage.h>
30 #include <asm/blackfin.h>
31 #include <asm/segment.h>
32 #include <asm/entry.h>
34 #define prepare_to_switch() do { } while(0)
37 * switch_to(n) should switch tasks to task ptr, first checking that
38 * ptr isn't the current task, in which case it does nothing. This
39 * also clears the TS-flag if the task we switched to has used the
40 * math co-processor latest.
42 * 05/25/01 - Tony Kou (tonyko@lineo.ca)
44 * Adapted for BlackFin (ADI) by Ted Ma, Metrowerks, and Motorola GSG
45 * Copyright (c) 2002 Arcturus Networks Inc. (www.arcturusnetworks.com)
46 * Copyright (c) 2003 Metrowerks (www.metrowerks.com)
49 asmlinkage
void resume(void);
51 #define switch_to(prev,next,last) { \
53 __asm__ __volatile__( \
61 : "CC", "R0", "R1", "R2", "R3", "R4", "R5", "P0", "P1");\
65 /* Force kerenl switch to user mode -- Steven Chen */
66 #define switch_to_user_mode() { \
67 __asm__ __volatile__( \
68 "call kernel_to_user_mode;\n\t" \
70 : "CC", "R0", "R1", "R2", "R3", "R4", "R5", "P0", "P1");\
74 * Interrupt configuring macros.
80 __asm__ __volatile__ ( \
83 ::"m"(irq_flags):"R3"); \
87 __asm__ __volatile__ ( \
92 #define __save_flags(x) { \
93 __asm__ __volatile__ ( \
100 #define __save_and_cli(x) { \
101 __asm__ __volatile__ ( \
107 #define __restore_flags(x) { \
108 __asm__ __volatile__ ( \
114 /* For spinlocks etc */
115 #define local_irq_save(x) __save_and_cli(x)
116 #define local_irq_restore(x) __restore_flags(x)
117 #define local_irq_disable() __cli()
118 #define local_irq_enable() __sti()
120 #define cli() __cli()
121 #define sti() __sti()
122 #define save_flags(x) __save_flags(x)
123 #define restore_flags(x) __restore_flags(x)
124 #define save_and_cli(x) __save_and_cli(x)
127 * Force strict CPU ordering.
129 #define nop() asm volatile ("nop;\n\t"::)
130 #define mb() asm volatile ("" : : :"memory")
131 #define rmb() asm volatile ("" : : :"memory")
132 #define wmb() asm volatile ("" : : :"memory")
133 #define set_rmb(var, value) do { xchg(&var, value); } while (0)
134 #define set_mb(var, value) set_rmb(var, value)
135 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
138 #define smp_mb() mb()
139 #define smp_rmb() rmb()
140 #define smp_wmb() wmb()
142 #define smp_mb() barrier()
143 #define smp_rmb() barrier()
144 #define smp_wmb() barrier()
147 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
148 #define tas(ptr) (xchg((ptr),1))
150 struct __xchg_dummy
{
151 unsigned long a
[100];
153 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
155 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
,
159 unsigned long flags
= 0;
165 __asm__
__volatile__("%0 = %2;\n\t" "%2 = %1;\n\t": "=&d"(tmp
): "d"(x
), "m"(*__xg(ptr
)):"memory");
168 __asm__
__volatile__("%0 = %2;\n\t" "%2 = %1;\n\t": "=&d"(tmp
): "d"(x
), "m"(*__xg(ptr
)):"memory");
171 __asm__
__volatile__("%0 = %2;\n\t" "%2 = %1;\n\t": "=&d"(tmp
): "d"(x
), "m"(*__xg(ptr
)):"memory");
174 restore_flags(flags
);
178 /* Depend on whether Blackfin has hard reset function */
179 /* YES it does, but it is tricky to implement - FIXME later ...MaTed--- */
180 #define HARD_RESET_NOW() ({})
182 #endif /* _BLACKFIN_SYSTEM_H */