[MIPS] Au1000: Remove au1000 code.
[linux-2.6/linux-mips/linux-dm7025.git] / include / asm-mips / hazards.h
blob25f5e8a4177d1378fb60645483a4e364c1e879c1
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) MIPS Technologies, Inc.
8 * written by Ralf Baechle <ralf@linux-mips.org>
9 */
10 #ifndef _ASM_HAZARDS_H
11 #define _ASM_HAZARDS_H
14 #ifdef __ASSEMBLY__
16 .macro _ssnop
17 sll $0, $0, 1
18 .endm
20 .macro _ehb
21 sll $0, $0, 3
22 .endm
25 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
26 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
27 * for data translations should not occur for 3 cpu cycles.
29 #ifdef CONFIG_CPU_RM9000
31 .macro mtc0_tlbw_hazard
32 .set push
33 .set mips32
34 _ssnop; _ssnop; _ssnop; _ssnop
35 .set pop
36 .endm
38 .macro tlbw_eret_hazard
39 .set push
40 .set mips32
41 _ssnop; _ssnop; _ssnop; _ssnop
42 .set pop
43 .endm
45 #else
48 * The taken branch will result in a two cycle penalty for the two killed
49 * instructions on R4000 / R4400. Other processors only have a single cycle
50 * hazard so this is nice trick to have an optimal code for a range of
51 * processors.
53 .macro mtc0_tlbw_hazard
54 b . + 8
55 .endm
57 .macro tlbw_eret_hazard
58 .endm
59 #endif
62 * mtc0->mfc0 hazard
63 * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
64 * It is a MIPS32R2 processor so ehb will clear the hazard.
67 #ifdef CONFIG_CPU_MIPSR2
69 * Use a macro for ehb unless explicit support for MIPSR2 is enabled
72 #define irq_enable_hazard \
73 _ehb
75 #define irq_disable_hazard \
76 _ehb
78 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
81 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
84 #define irq_enable_hazard
86 #define irq_disable_hazard
88 #else
91 * Classic MIPS needs 1 - 3 nops or ssnops
93 #define irq_enable_hazard
94 #define irq_disable_hazard \
95 _ssnop; _ssnop; _ssnop
97 #endif
99 #else /* __ASSEMBLY__ */
101 __asm__(
102 " .macro _ssnop \n"
103 " sll $0, $0, 1 \n"
104 " .endm \n"
105 " \n"
106 " .macro _ehb \n"
107 " sll $0, $0, 3 \n"
108 " .endm \n");
110 #ifdef CONFIG_CPU_RM9000
113 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
114 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
115 * for data translations should not occur for 3 cpu cycles.
118 #define mtc0_tlbw_hazard() \
119 __asm__ __volatile__( \
120 " .set mips32 \n" \
121 " _ssnop \n" \
122 " _ssnop \n" \
123 " _ssnop \n" \
124 " _ssnop \n" \
125 " .set mips0 \n")
127 #define tlbw_use_hazard() \
128 __asm__ __volatile__( \
129 " .set mips32 \n" \
130 " _ssnop \n" \
131 " _ssnop \n" \
132 " _ssnop \n" \
133 " _ssnop \n" \
134 " .set mips0 \n")
136 #else
139 * Overkill warning ...
141 #define mtc0_tlbw_hazard() \
142 __asm__ __volatile__( \
143 " .set noreorder \n" \
144 " nop \n" \
145 " nop \n" \
146 " nop \n" \
147 " nop \n" \
148 " nop \n" \
149 " nop \n" \
150 " .set reorder \n")
152 #define tlbw_use_hazard() \
153 __asm__ __volatile__( \
154 " .set noreorder \n" \
155 " nop \n" \
156 " nop \n" \
157 " nop \n" \
158 " nop \n" \
159 " nop \n" \
160 " nop \n" \
161 " .set reorder \n")
163 #endif
166 * Interrupt enable/disable hazards
167 * Some processors have hazards when modifying
168 * the status register to change the interrupt state
171 #ifdef CONFIG_CPU_MIPSR2
173 __asm__(" .macro irq_enable_hazard \n"
174 " _ehb \n"
175 " .endm \n"
176 " \n"
177 " .macro irq_disable_hazard \n"
178 " _ehb \n"
179 " .endm \n");
181 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
184 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
187 __asm__(
188 " .macro irq_enable_hazard \n"
189 " .endm \n"
190 " \n"
191 " .macro irq_disable_hazard \n"
192 " .endm \n");
194 #else
197 * Default for classic MIPS processors. Assume worst case hazards but don't
198 * care about the irq_enable_hazard - sooner or later the hardware will
199 * enable it and we don't care when exactly.
202 __asm__(
203 " # \n"
204 " # There is a hazard but we do not care \n"
205 " # \n"
206 " .macro\tirq_enable_hazard \n"
207 " .endm \n"
208 " \n"
209 " .macro\tirq_disable_hazard \n"
210 " _ssnop \n"
211 " _ssnop \n"
212 " _ssnop \n"
213 " .endm \n");
215 #endif
217 #define irq_enable_hazard() \
218 __asm__ __volatile__("irq_enable_hazard")
219 #define irq_disable_hazard() \
220 __asm__ __volatile__("irq_disable_hazard")
224 * Back-to-back hazards -
226 * What is needed to separate a move to cp0 from a subsequent read from the
227 * same cp0 register?
229 #ifdef CONFIG_CPU_MIPSR2
231 __asm__(" .macro back_to_back_c0_hazard \n"
232 " _ehb \n"
233 " .endm \n");
235 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
236 defined(CONFIG_CPU_SB1)
238 __asm__(" .macro back_to_back_c0_hazard \n"
239 " .endm \n");
241 #else
243 __asm__(" .macro back_to_back_c0_hazard \n"
244 " .set noreorder \n"
245 " _ssnop \n"
246 " _ssnop \n"
247 " _ssnop \n"
248 " .set reorder \n"
249 " .endm");
251 #endif
253 #define back_to_back_c0_hazard() \
254 __asm__ __volatile__("back_to_back_c0_hazard")
258 * Instruction execution hazard
260 #ifdef CONFIG_CPU_MIPSR2
262 * gcc has a tradition of misscompiling the previous construct using the
263 * address of a label as argument to inline assembler. Gas otoh has the
264 * annoying difference between la and dla which are only usable for 32-bit
265 * rsp. 64-bit code, so can't be used without conditional compilation.
266 * The alterantive is switching the assembler to 64-bit code which happens
267 * to work right even for 32-bit code ...
269 #define instruction_hazard() \
270 do { \
271 unsigned long tmp; \
273 __asm__ __volatile__( \
274 " .set mips64r2 \n" \
275 " dla %0, 1f \n" \
276 " jr.hb %0 \n" \
277 " .set mips0 \n" \
278 "1: \n" \
279 : "=r" (tmp)); \
280 } while (0)
282 #else
283 #define instruction_hazard() do { } while (0)
284 #endif
286 extern void mips_ihb(void);
288 #endif /* __ASSEMBLY__ */
290 #endif /* _ASM_HAZARDS_H */