Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / arch / blackfin / kernel / fixed_code.S
blobe50b5790b2544d109a2f52414af63741abdf5b01
1 /*
2  * This file contains sequences of code that will be copied to a
3  * fixed location, defined in <asm/atomic_seq.h>.  The interrupt
4  * handlers ensure that these sequences appear to be atomic when
5  * executed from userspace.
6  * These are aligned to 16 bytes, so that we have some space to replace
7  * these sequences with something else (e.g. kernel traps if we ever do
8  * BF561 SMP).
9  */
10 #include <linux/linkage.h>
11 #include <linux/unistd.h>
12 #include <asm/entry.h>
14 .text
15 ENTRY(_fixed_code_start)
17 .align 16
18 ENTRY(_sigreturn_stub)
19         P0 = __NR_rt_sigreturn;
20         EXCPT 0;
21         /* Speculative execution paranoia.  */
22 0:      JUMP.S 0b;
23 ENDPROC (_sigreturn_stub)
25 .align 16
26         /*
27          * Atomic swap, 8 bit.
28          * Inputs:      P0: memory address to use
29          *              R1: value to store
30          * Output:      R0: old contents of the memory address, zero extended.
31          */
32 ENTRY(_atomic_xchg32)
33         R0 = [P0];
34         [P0] = R1;
35         rts;
36 ENDPROC (_atomic_xchg32)
38 .align 16
39         /*
40          * Compare and swap, 32 bit.
41          * Inputs:      P0: memory address to use
42          *              R1: compare value
43          *              R2: new value to store
44          * The new value is stored if the contents of the memory
45          * address is equal to the compare value.
46          * Output:      R0: old contents of the memory address.
47          */
48 ENTRY(_atomic_cas32)
49         R0 = [P0];
50         CC = R0 == R1;
51         IF !CC JUMP 1f;
52         [P0] = R2;
54         rts;
55 ENDPROC (_atomic_cas32)
57 .align 16
58         /*
59          * Atomic add, 32 bit.
60          * Inputs:      P0: memory address to use
61          *              R0: value to add
62          * Outputs:     R0: new contents of the memory address.
63          *              R1: previous contents of the memory address.
64          */
65 ENTRY(_atomic_add32)
66         R1 = [P0];
67         R0 = R1 + R0;
68         [P0] = R0;
69         rts;
70 ENDPROC (_atomic_add32)
72 .align 16
73         /*
74          * Atomic sub, 32 bit.
75          * Inputs:      P0: memory address to use
76          *              R0: value to subtract
77          * Outputs:     R0: new contents of the memory address.
78          *              R1: previous contents of the memory address.
79          */
80 ENTRY(_atomic_sub32)
81         R1 = [P0];
82         R0 = R1 - R0;
83         [P0] = R0;
84         rts;
85 ENDPROC (_atomic_sub32)
87 .align 16
88         /*
89          * Atomic ior, 32 bit.
90          * Inputs:      P0: memory address to use
91          *              R0: value to ior
92          * Outputs:     R0: new contents of the memory address.
93          *              R1: previous contents of the memory address.
94          */
95 ENTRY(_atomic_ior32)
96         R1 = [P0];
97         R0 = R1 | R0;
98         [P0] = R0;
99         rts;
100 ENDPROC (_atomic_ior32)
102 .align 16
103         /*
104 <<<<<<< HEAD:arch/blackfin/kernel/fixed_code.S
105          * Atomic ior, 32 bit.
106 =======
107          * Atomic and, 32 bit.
108 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/blackfin/kernel/fixed_code.S
109          * Inputs:      P0: memory address to use
110 <<<<<<< HEAD:arch/blackfin/kernel/fixed_code.S
111          *              R0: value to ior
112 =======
113          *              R0: value to and
114 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/blackfin/kernel/fixed_code.S
115          * Outputs:     R0: new contents of the memory address.
116          *              R1: previous contents of the memory address.
117          */
118 ENTRY(_atomic_and32)
119         R1 = [P0];
120         R0 = R1 & R0;
121         [P0] = R0;
122         rts;
123 <<<<<<< HEAD:arch/blackfin/kernel/fixed_code.S
124 ENDPROC (_atomic_ior32)
125 =======
126 ENDPROC (_atomic_and32)
127 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/blackfin/kernel/fixed_code.S
129 .align 16
130         /*
131 <<<<<<< HEAD:arch/blackfin/kernel/fixed_code.S
132          * Atomic ior, 32 bit.
133 =======
134          * Atomic xor, 32 bit.
135 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/blackfin/kernel/fixed_code.S
136          * Inputs:      P0: memory address to use
137 <<<<<<< HEAD:arch/blackfin/kernel/fixed_code.S
138          *              R0: value to ior
139 =======
140          *              R0: value to xor
141 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/blackfin/kernel/fixed_code.S
142          * Outputs:     R0: new contents of the memory address.
143          *              R1: previous contents of the memory address.
144          */
145 ENTRY(_atomic_xor32)
146         R1 = [P0];
147         R0 = R1 ^ R0;
148         [P0] = R0;
149         rts;
150 <<<<<<< HEAD:arch/blackfin/kernel/fixed_code.S
151 ENDPROC (_atomic_ior32)
152 =======
153 ENDPROC (_atomic_xor32)
154 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:arch/blackfin/kernel/fixed_code.S
156 .align 16
157         /*
158          * safe_user_instruction
159          * Four NOPS are enough to allow the pipeline to speculativily load
160          * execute anything it wants. After that, things have gone bad, and
161          * we are stuck - so panic. Since we might be in user space, we can't
162          * call panic, so just cause a unhandled exception, this should cause
163          * a dump of the trace buffer so we can tell were we are, and a reboot
164          */
165 ENTRY(_safe_user_instruction)
166         NOP; NOP; NOP; NOP;
167         EXCPT 0x4;
168 ENDPROC(_safe_user_instruction)
170 ENTRY(_fixed_code_end)