xfs: calculate XFS_TRANS_QM_QUOTAOFF_END space log reservation at mount time
[linux/fpc-iii.git] / arch / mips / include / asm / irqflags.h
blob9f3384c789d7bfdff5511706b0dffc3d2962c96c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Copyright (C) 2000 MIPS Technologies, Inc.
11 #ifndef _ASM_IRQFLAGS_H
12 #define _ASM_IRQFLAGS_H
14 #ifndef __ASSEMBLY__
16 #include <linux/compiler.h>
17 #include <asm/hazards.h>
19 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
21 __asm__(
22 " .macro arch_local_irq_disable\n"
23 " .set push \n"
24 " .set noat \n"
25 " di \n"
26 " irq_disable_hazard \n"
27 " .set pop \n"
28 " .endm \n");
30 static inline void arch_local_irq_disable(void)
32 __asm__ __volatile__(
33 "arch_local_irq_disable"
34 : /* no outputs */
35 : /* no inputs */
36 : "memory");
40 __asm__(
41 " .macro arch_local_irq_save result \n"
42 " .set push \n"
43 " .set reorder \n"
44 " .set noat \n"
45 " di \\result \n"
46 " andi \\result, 1 \n"
47 " irq_disable_hazard \n"
48 " .set pop \n"
49 " .endm \n");
51 static inline unsigned long arch_local_irq_save(void)
53 unsigned long flags;
54 asm volatile("arch_local_irq_save\t%0"
55 : "=r" (flags)
56 : /* no inputs */
57 : "memory");
58 return flags;
62 __asm__(
63 " .macro arch_local_irq_restore flags \n"
64 " .set push \n"
65 " .set noreorder \n"
66 " .set noat \n"
67 #if defined(CONFIG_IRQ_CPU)
69 * Slow, but doesn't suffer from a relatively unlikely race
70 * condition we're having since days 1.
72 " beqz \\flags, 1f \n"
73 " di \n"
74 " ei \n"
75 "1: \n"
76 #else
78 * Fast, dangerous. Life is fun, life is good.
80 " mfc0 $1, $12 \n"
81 " ins $1, \\flags, 0, 1 \n"
82 " mtc0 $1, $12 \n"
83 #endif
84 " irq_disable_hazard \n"
85 " .set pop \n"
86 " .endm \n");
88 static inline void arch_local_irq_restore(unsigned long flags)
90 unsigned long __tmp1;
92 __asm__ __volatile__(
93 "arch_local_irq_restore\t%0"
94 : "=r" (__tmp1)
95 : "0" (flags)
96 : "memory");
99 static inline void __arch_local_irq_restore(unsigned long flags)
101 unsigned long __tmp1;
103 __asm__ __volatile__(
104 "arch_local_irq_restore\t%0"
105 : "=r" (__tmp1)
106 : "0" (flags)
107 : "memory");
109 #else
110 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
111 void arch_local_irq_disable(void);
112 unsigned long arch_local_irq_save(void);
113 void arch_local_irq_restore(unsigned long flags);
114 void __arch_local_irq_restore(unsigned long flags);
115 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
118 __asm__(
119 " .macro arch_local_irq_enable \n"
120 " .set push \n"
121 " .set reorder \n"
122 " .set noat \n"
123 #ifdef CONFIG_MIPS_MT_SMTC
124 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
125 " ori $1, 0x400 \n"
126 " xori $1, 0x400 \n"
127 " mtc0 $1, $2, 1 \n"
128 #elif defined(CONFIG_CPU_MIPSR2)
129 " ei \n"
130 #else
131 " mfc0 $1,$12 \n"
132 " ori $1,0x1f \n"
133 " xori $1,0x1e \n"
134 " mtc0 $1,$12 \n"
135 #endif
136 " irq_enable_hazard \n"
137 " .set pop \n"
138 " .endm");
140 extern void smtc_ipi_replay(void);
142 static inline void arch_local_irq_enable(void)
144 #ifdef CONFIG_MIPS_MT_SMTC
146 * SMTC kernel needs to do a software replay of queued
147 * IPIs, at the cost of call overhead on each local_irq_enable()
149 smtc_ipi_replay();
150 #endif
151 __asm__ __volatile__(
152 "arch_local_irq_enable"
153 : /* no outputs */
154 : /* no inputs */
155 : "memory");
159 __asm__(
160 " .macro arch_local_save_flags flags \n"
161 " .set push \n"
162 " .set reorder \n"
163 #ifdef CONFIG_MIPS_MT_SMTC
164 " mfc0 \\flags, $2, 1 \n"
165 #else
166 " mfc0 \\flags, $12 \n"
167 #endif
168 " .set pop \n"
169 " .endm \n");
171 static inline unsigned long arch_local_save_flags(void)
173 unsigned long flags;
174 asm volatile("arch_local_save_flags %0" : "=r" (flags));
175 return flags;
179 static inline int arch_irqs_disabled_flags(unsigned long flags)
181 #ifdef CONFIG_MIPS_MT_SMTC
183 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
185 return flags & 0x400;
186 #else
187 return !(flags & 1);
188 #endif
191 #endif /* #ifndef __ASSEMBLY__ */
194 * Do the CPU's IRQ-state tracing from assembly code.
196 #ifdef CONFIG_TRACE_IRQFLAGS
197 /* Reload some registers clobbered by trace_hardirqs_on */
198 #ifdef CONFIG_64BIT
199 # define TRACE_IRQS_RELOAD_REGS \
200 LONG_L $11, PT_R11(sp); \
201 LONG_L $10, PT_R10(sp); \
202 LONG_L $9, PT_R9(sp); \
203 LONG_L $8, PT_R8(sp); \
204 LONG_L $7, PT_R7(sp); \
205 LONG_L $6, PT_R6(sp); \
206 LONG_L $5, PT_R5(sp); \
207 LONG_L $4, PT_R4(sp); \
208 LONG_L $2, PT_R2(sp)
209 #else
210 # define TRACE_IRQS_RELOAD_REGS \
211 LONG_L $7, PT_R7(sp); \
212 LONG_L $6, PT_R6(sp); \
213 LONG_L $5, PT_R5(sp); \
214 LONG_L $4, PT_R4(sp); \
215 LONG_L $2, PT_R2(sp)
216 #endif
217 # define TRACE_IRQS_ON \
218 CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
219 jal trace_hardirqs_on
220 # define TRACE_IRQS_ON_RELOAD \
221 TRACE_IRQS_ON; \
222 TRACE_IRQS_RELOAD_REGS
223 # define TRACE_IRQS_OFF \
224 jal trace_hardirqs_off
225 #else
226 # define TRACE_IRQS_ON
227 # define TRACE_IRQS_ON_RELOAD
228 # define TRACE_IRQS_OFF
229 #endif
231 #endif /* _ASM_IRQFLAGS_H */