[SPARC64]: Update defconfig.
[linux-2.6/verdex.git] / include / asm-mips / hazards.h
blobfeb29a79388869199d0331fae77273a1d0473873
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) MIPS Technologies, Inc.
8 * written by Ralf Baechle <ralf@linux-mips.org>
9 */
10 #ifndef _ASM_HAZARDS_H
11 #define _ASM_HAZARDS_H
13 #include <linux/config.h>
15 #ifdef __ASSEMBLY__
17 .macro _ssnop
18 sll $0, $0, 1
19 .endm
21 .macro _ehb
22 sll $0, $0, 3
23 .endm
26 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
27 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
28 * for data translations should not occur for 3 cpu cycles.
30 #ifdef CONFIG_CPU_RM9000
32 .macro mtc0_tlbw_hazard
33 .set push
34 .set mips32
35 _ssnop; _ssnop; _ssnop; _ssnop
36 .set pop
37 .endm
39 .macro tlbw_eret_hazard
40 .set push
41 .set mips32
42 _ssnop; _ssnop; _ssnop; _ssnop
43 .set pop
44 .endm
46 #else
49 * The taken branch will result in a two cycle penalty for the two killed
50 * instructions on R4000 / R4400. Other processors only have a single cycle
51 * hazard so this is nice trick to have an optimal code for a range of
52 * processors.
54 .macro mtc0_tlbw_hazard
55 b . + 8
56 .endm
58 .macro tlbw_eret_hazard
59 .endm
60 #endif
63 * mtc0->mfc0 hazard
64 * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
65 * It is a MIPS32R2 processor so ehb will clear the hazard.
68 #ifdef CONFIG_CPU_MIPSR2
70 * Use a macro for ehb unless explicit support for MIPSR2 is enabled
73 #define irq_enable_hazard
74 _ehb
76 #define irq_disable_hazard
77 _ehb
79 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
82 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
85 #define irq_enable_hazard
87 #define irq_disable_hazard
89 #else
92 * Classic MIPS needs 1 - 3 nops or ssnops
94 #define irq_enable_hazard
95 #define irq_disable_hazard \
96 _ssnop; _ssnop; _ssnop
98 #endif
100 #else /* __ASSEMBLY__ */
102 __asm__(
103 " .macro _ssnop \n"
104 " sll $0, $0, 1 \n"
105 " .endm \n"
106 " \n"
107 " .macro _ehb \n"
108 " sll $0, $0, 3 \n"
109 " .endm \n");
111 #ifdef CONFIG_CPU_RM9000
114 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
115 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
116 * for data translations should not occur for 3 cpu cycles.
119 #define mtc0_tlbw_hazard() \
120 __asm__ __volatile__( \
121 " .set mips32 \n" \
122 " _ssnop \n" \
123 " _ssnop \n" \
124 " _ssnop \n" \
125 " _ssnop \n" \
126 " .set mips0 \n")
128 #define tlbw_use_hazard() \
129 __asm__ __volatile__( \
130 " .set mips32 \n" \
131 " _ssnop \n" \
132 " _ssnop \n" \
133 " _ssnop \n" \
134 " _ssnop \n" \
135 " .set mips0 \n")
137 #else
140 * Overkill warning ...
142 #define mtc0_tlbw_hazard() \
143 __asm__ __volatile__( \
144 " .set noreorder \n" \
145 " nop \n" \
146 " nop \n" \
147 " nop \n" \
148 " nop \n" \
149 " nop \n" \
150 " nop \n" \
151 " .set reorder \n")
153 #define tlbw_use_hazard() \
154 __asm__ __volatile__( \
155 " .set noreorder \n" \
156 " nop \n" \
157 " nop \n" \
158 " nop \n" \
159 " nop \n" \
160 " nop \n" \
161 " nop \n" \
162 " .set reorder \n")
164 #endif
167 * Interrupt enable/disable hazards
168 * Some processors have hazards when modifying
169 * the status register to change the interrupt state
172 #ifdef CONFIG_CPU_MIPSR2
174 __asm__(" .macro irq_enable_hazard \n"
175 " _ehb \n"
176 " .endm \n"
177 " \n"
178 " .macro irq_disable_hazard \n"
179 " _ehb \n"
180 " .endm \n");
182 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
185 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
188 __asm__(
189 " .macro irq_enable_hazard \n"
190 " .endm \n"
191 " \n"
192 " .macro irq_disable_hazard \n"
193 " .endm \n");
195 #else
198 * Default for classic MIPS processors. Assume worst case hazards but don't
199 * care about the irq_enable_hazard - sooner or later the hardware will
200 * enable it and we don't care when exactly.
203 __asm__(
204 " # \n"
205 " # There is a hazard but we do not care \n"
206 " # \n"
207 " .macro\tirq_enable_hazard \n"
208 " .endm \n"
209 " \n"
210 " .macro\tirq_disable_hazard \n"
211 " _ssnop \n"
212 " _ssnop \n"
213 " _ssnop \n"
214 " .endm \n");
216 #endif
218 #define irq_enable_hazard() \
219 __asm__ __volatile__("irq_enable_hazard")
220 #define irq_disable_hazard() \
221 __asm__ __volatile__("irq_disable_hazard")
225 * Back-to-back hazards -
227 * What is needed to separate a move to cp0 from a subsequent read from the
228 * same cp0 register?
230 #ifdef CONFIG_CPU_MIPSR2
232 __asm__(" .macro back_to_back_c0_hazard \n"
233 " _ehb \n"
234 " .endm \n");
236 #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
237 defined(CONFIG_CPU_SB1)
239 __asm__(" .macro back_to_back_c0_hazard \n"
240 " .endm \n");
242 #else
244 __asm__(" .macro back_to_back_c0_hazard \n"
245 " .set noreorder \n"
246 " _ssnop \n"
247 " _ssnop \n"
248 " _ssnop \n"
249 " .set reorder \n"
250 " .endm");
252 #endif
254 #define back_to_back_c0_hazard() \
255 __asm__ __volatile__("back_to_back_c0_hazard")
259 * Instruction execution hazard
261 #ifdef CONFIG_CPU_MIPSR2
263 * gcc has a tradition of misscompiling the previous construct using the
264 * address of a label as argument to inline assembler. Gas otoh has the
265 * annoying difference between la and dla which are only usable for 32-bit
266 * rsp. 64-bit code, so can't be used without conditional compilation.
267 * The alterantive is switching the assembler to 64-bit code which happens
268 * to work right even for 32-bit code ...
270 #define instruction_hazard() \
271 do { \
272 unsigned long tmp; \
274 __asm__ __volatile__( \
275 " .set mips64r2 \n" \
276 " dla %0, 1f \n" \
277 " jr.hb %0 \n" \
278 " .set mips0 \n" \
279 "1: \n" \
280 : "=r" (tmp)); \
281 } while (0)
283 #else
284 #define instruction_hazard() do { } while (0)
285 #endif
287 #endif /* __ASSEMBLY__ */
289 #endif /* _ASM_HAZARDS_H */