Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / mips / include / asm / system.h
blob6018c80ce37a0add5dde7a60d0979c397ea188be
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
12 #ifndef _ASM_SYSTEM_H
13 #define _ASM_SYSTEM_H
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
19 #include <asm/addrspace.h>
20 #include <asm/barrier.h>
21 #include <asm/cmpxchg.h>
22 #include <asm/cpu-features.h>
23 #include <asm/dsp.h>
24 #include <asm/watch.h>
25 #include <asm/war.h>
29 * switch_to(n) should switch tasks to task nr n, first
30 * checking that n isn't the current task, in which case it does nothing.
32 extern asmlinkage void *resume(void *last, void *next, void *next_ti);
34 struct task_struct;
36 extern unsigned int ll_bit;
37 extern struct task_struct *ll_task;
39 #ifdef CONFIG_MIPS_MT_FPAFF
42 * Handle the scheduler resume end of FPU affinity management. We do this
43 * inline to try to keep the overhead down. If we have been forced to run on
44 * a "CPU" with an FPU because of a previous high level of FP computation,
45 * but did not actually use the FPU during the most recent time-slice (CU1
46 * isn't set), we undo the restriction on cpus_allowed.
48 * We're not calling set_cpus_allowed() here, because we have no need to
49 * force prompt migration - we're already switching the current CPU to a
50 * different thread.
53 #define __mips_mt_fpaff_switch_to(prev) \
54 do { \
55 struct thread_info *__prev_ti = task_thread_info(prev); \
57 if (cpu_has_fpu && \
58 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
59 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
60 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
61 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
62 } \
63 next->thread.emulated_fp = 0; \
64 } while(0)
66 #else
67 #define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
68 #endif
70 #define __clear_software_ll_bit() \
71 do { \
72 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
73 ll_bit = 0; \
74 } while (0)
76 #define switch_to(prev, next, last) \
77 do { \
78 __mips_mt_fpaff_switch_to(prev); \
79 if (cpu_has_dsp) \
80 __save_dsp(prev); \
81 __clear_software_ll_bit(); \
82 (last) = resume(prev, next, task_thread_info(next)); \
83 } while (0)
85 #define finish_arch_switch(prev) \
86 do { \
87 if (cpu_has_dsp) \
88 __restore_dsp(current); \
89 if (cpu_has_userlocal) \
90 write_c0_userlocal(current_thread_info()->tp_value); \
91 __restore_watch(); \
92 } while (0)
94 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
96 __u32 retval;
98 smp_mb__before_llsc();
100 if (kernel_uses_llsc && R10000_LLSC_WAR) {
101 unsigned long dummy;
103 __asm__ __volatile__(
104 " .set mips3 \n"
105 "1: ll %0, %3 # xchg_u32 \n"
106 " .set mips0 \n"
107 " move %2, %z4 \n"
108 " .set mips3 \n"
109 " sc %2, %1 \n"
110 " beqzl %2, 1b \n"
111 " .set mips0 \n"
112 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
113 : "R" (*m), "Jr" (val)
114 : "memory");
115 } else if (kernel_uses_llsc) {
116 unsigned long dummy;
118 do {
119 __asm__ __volatile__(
120 " .set mips3 \n"
121 " ll %0, %3 # xchg_u32 \n"
122 " .set mips0 \n"
123 " move %2, %z4 \n"
124 " .set mips3 \n"
125 " sc %2, %1 \n"
126 " .set mips0 \n"
127 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
128 : "R" (*m), "Jr" (val)
129 : "memory");
130 } while (unlikely(!dummy));
131 } else {
132 unsigned long flags;
134 raw_local_irq_save(flags);
135 retval = *m;
136 *m = val;
137 raw_local_irq_restore(flags); /* implies memory barrier */
140 smp_llsc_mb();
142 return retval;
145 #ifdef CONFIG_64BIT
146 static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
148 __u64 retval;
150 smp_mb__before_llsc();
152 if (kernel_uses_llsc && R10000_LLSC_WAR) {
153 unsigned long dummy;
155 __asm__ __volatile__(
156 " .set mips3 \n"
157 "1: lld %0, %3 # xchg_u64 \n"
158 " move %2, %z4 \n"
159 " scd %2, %1 \n"
160 " beqzl %2, 1b \n"
161 " .set mips0 \n"
162 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
163 : "R" (*m), "Jr" (val)
164 : "memory");
165 } else if (kernel_uses_llsc) {
166 unsigned long dummy;
168 do {
169 __asm__ __volatile__(
170 " .set mips3 \n"
171 " lld %0, %3 # xchg_u64 \n"
172 " move %2, %z4 \n"
173 " scd %2, %1 \n"
174 " .set mips0 \n"
175 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
176 : "R" (*m), "Jr" (val)
177 : "memory");
178 } while (unlikely(!dummy));
179 } else {
180 unsigned long flags;
182 raw_local_irq_save(flags);
183 retval = *m;
184 *m = val;
185 raw_local_irq_restore(flags); /* implies memory barrier */
188 smp_llsc_mb();
190 return retval;
192 #else
193 extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
194 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
195 #endif
197 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
199 switch (size) {
200 case 4:
201 return __xchg_u32(ptr, x);
202 case 8:
203 return __xchg_u64(ptr, x);
206 return x;
209 #define xchg(ptr, x) \
210 ({ \
211 BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
213 ((__typeof__(*(ptr))) \
214 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
217 extern void set_handler(unsigned long offset, void *addr, unsigned long len);
218 extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
220 typedef void (*vi_handler_t)(void);
221 extern void *set_vi_handler(int n, vi_handler_t addr);
223 extern void *set_except_vector(int n, void *addr);
224 extern unsigned long ebase;
225 extern void per_cpu_trap_init(void);
228 * See include/asm-ia64/system.h; prevents deadlock on SMP
229 * systems.
231 #define __ARCH_WANT_UNLOCKED_CTXSW
233 extern unsigned long arch_align_stack(unsigned long sp);
235 #endif /* _ASM_SYSTEM_H */