Linux v2.6.13-rc3
[pohmelfs.git] / kernel / spinlock.c
blob0c3f9d8bbe17b3bd00a1c4a77e941bbebc90e226
1 /*
2 * Copyright (2004) Linus Torvalds
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 * Copyright (2004) Ingo Molnar
7 */
9 #include <linux/config.h>
10 #include <linux/linkage.h>
11 #include <linux/preempt.h>
12 #include <linux/spinlock.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
17 * Generic declaration of the raw read_trylock() function,
18 * architectures are supposed to optimize this:
20 int __lockfunc generic_raw_read_trylock(rwlock_t *lock)
22 _raw_read_lock(lock);
23 return 1;
25 EXPORT_SYMBOL(generic_raw_read_trylock);
27 int __lockfunc _spin_trylock(spinlock_t *lock)
29 preempt_disable();
30 if (_raw_spin_trylock(lock))
31 return 1;
33 preempt_enable();
34 return 0;
36 EXPORT_SYMBOL(_spin_trylock);
38 int __lockfunc _read_trylock(rwlock_t *lock)
40 preempt_disable();
41 if (_raw_read_trylock(lock))
42 return 1;
44 preempt_enable();
45 return 0;
47 EXPORT_SYMBOL(_read_trylock);
49 int __lockfunc _write_trylock(rwlock_t *lock)
51 preempt_disable();
52 if (_raw_write_trylock(lock))
53 return 1;
55 preempt_enable();
56 return 0;
58 EXPORT_SYMBOL(_write_trylock);
60 #ifndef CONFIG_PREEMPT
62 void __lockfunc _read_lock(rwlock_t *lock)
64 preempt_disable();
65 _raw_read_lock(lock);
67 EXPORT_SYMBOL(_read_lock);
69 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
71 unsigned long flags;
73 local_irq_save(flags);
74 preempt_disable();
75 _raw_spin_lock_flags(lock, flags);
76 return flags;
78 EXPORT_SYMBOL(_spin_lock_irqsave);
80 void __lockfunc _spin_lock_irq(spinlock_t *lock)
82 local_irq_disable();
83 preempt_disable();
84 _raw_spin_lock(lock);
86 EXPORT_SYMBOL(_spin_lock_irq);
88 void __lockfunc _spin_lock_bh(spinlock_t *lock)
90 local_bh_disable();
91 preempt_disable();
92 _raw_spin_lock(lock);
94 EXPORT_SYMBOL(_spin_lock_bh);
96 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
98 unsigned long flags;
100 local_irq_save(flags);
101 preempt_disable();
102 _raw_read_lock(lock);
103 return flags;
105 EXPORT_SYMBOL(_read_lock_irqsave);
107 void __lockfunc _read_lock_irq(rwlock_t *lock)
109 local_irq_disable();
110 preempt_disable();
111 _raw_read_lock(lock);
113 EXPORT_SYMBOL(_read_lock_irq);
115 void __lockfunc _read_lock_bh(rwlock_t *lock)
117 local_bh_disable();
118 preempt_disable();
119 _raw_read_lock(lock);
121 EXPORT_SYMBOL(_read_lock_bh);
123 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
125 unsigned long flags;
127 local_irq_save(flags);
128 preempt_disable();
129 _raw_write_lock(lock);
130 return flags;
132 EXPORT_SYMBOL(_write_lock_irqsave);
134 void __lockfunc _write_lock_irq(rwlock_t *lock)
136 local_irq_disable();
137 preempt_disable();
138 _raw_write_lock(lock);
140 EXPORT_SYMBOL(_write_lock_irq);
142 void __lockfunc _write_lock_bh(rwlock_t *lock)
144 local_bh_disable();
145 preempt_disable();
146 _raw_write_lock(lock);
148 EXPORT_SYMBOL(_write_lock_bh);
150 void __lockfunc _spin_lock(spinlock_t *lock)
152 preempt_disable();
153 _raw_spin_lock(lock);
156 EXPORT_SYMBOL(_spin_lock);
158 void __lockfunc _write_lock(rwlock_t *lock)
160 preempt_disable();
161 _raw_write_lock(lock);
164 EXPORT_SYMBOL(_write_lock);
166 #else /* CONFIG_PREEMPT: */
169 * This could be a long-held lock. We both prepare to spin for a long
170 * time (making _this_ CPU preemptable if possible), and we also signal
171 * towards that other CPU that it should break the lock ASAP.
173 * (We do this in a function because inlining it would be excessive.)
176 #define BUILD_LOCK_OPS(op, locktype) \
177 void __lockfunc _##op##_lock(locktype##_t *lock) \
179 preempt_disable(); \
180 for (;;) { \
181 if (likely(_raw_##op##_trylock(lock))) \
182 break; \
183 preempt_enable(); \
184 if (!(lock)->break_lock) \
185 (lock)->break_lock = 1; \
186 while (!op##_can_lock(lock) && (lock)->break_lock) \
187 cpu_relax(); \
188 preempt_disable(); \
190 (lock)->break_lock = 0; \
193 EXPORT_SYMBOL(_##op##_lock); \
195 unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
197 unsigned long flags; \
199 preempt_disable(); \
200 for (;;) { \
201 local_irq_save(flags); \
202 if (likely(_raw_##op##_trylock(lock))) \
203 break; \
204 local_irq_restore(flags); \
206 preempt_enable(); \
207 if (!(lock)->break_lock) \
208 (lock)->break_lock = 1; \
209 while (!op##_can_lock(lock) && (lock)->break_lock) \
210 cpu_relax(); \
211 preempt_disable(); \
213 (lock)->break_lock = 0; \
214 return flags; \
217 EXPORT_SYMBOL(_##op##_lock_irqsave); \
219 void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
221 _##op##_lock_irqsave(lock); \
224 EXPORT_SYMBOL(_##op##_lock_irq); \
226 void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
228 unsigned long flags; \
230 /* */ \
231 /* Careful: we must exclude softirqs too, hence the */ \
232 /* irq-disabling. We use the generic preemption-aware */ \
233 /* function: */ \
234 /**/ \
235 flags = _##op##_lock_irqsave(lock); \
236 local_bh_disable(); \
237 local_irq_restore(flags); \
240 EXPORT_SYMBOL(_##op##_lock_bh)
243 * Build preemption-friendly versions of the following
244 * lock-spinning functions:
246 * _[spin|read|write]_lock()
247 * _[spin|read|write]_lock_irq()
248 * _[spin|read|write]_lock_irqsave()
249 * _[spin|read|write]_lock_bh()
251 BUILD_LOCK_OPS(spin, spinlock);
252 BUILD_LOCK_OPS(read, rwlock);
253 BUILD_LOCK_OPS(write, rwlock);
255 #endif /* CONFIG_PREEMPT */
257 void __lockfunc _spin_unlock(spinlock_t *lock)
259 _raw_spin_unlock(lock);
260 preempt_enable();
262 EXPORT_SYMBOL(_spin_unlock);
264 void __lockfunc _write_unlock(rwlock_t *lock)
266 _raw_write_unlock(lock);
267 preempt_enable();
269 EXPORT_SYMBOL(_write_unlock);
271 void __lockfunc _read_unlock(rwlock_t *lock)
273 _raw_read_unlock(lock);
274 preempt_enable();
276 EXPORT_SYMBOL(_read_unlock);
278 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
280 _raw_spin_unlock(lock);
281 local_irq_restore(flags);
282 preempt_enable();
284 EXPORT_SYMBOL(_spin_unlock_irqrestore);
286 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
288 _raw_spin_unlock(lock);
289 local_irq_enable();
290 preempt_enable();
292 EXPORT_SYMBOL(_spin_unlock_irq);
294 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
296 _raw_spin_unlock(lock);
297 preempt_enable_no_resched();
298 local_bh_enable();
300 EXPORT_SYMBOL(_spin_unlock_bh);
302 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
304 _raw_read_unlock(lock);
305 local_irq_restore(flags);
306 preempt_enable();
308 EXPORT_SYMBOL(_read_unlock_irqrestore);
310 void __lockfunc _read_unlock_irq(rwlock_t *lock)
312 _raw_read_unlock(lock);
313 local_irq_enable();
314 preempt_enable();
316 EXPORT_SYMBOL(_read_unlock_irq);
318 void __lockfunc _read_unlock_bh(rwlock_t *lock)
320 _raw_read_unlock(lock);
321 preempt_enable_no_resched();
322 local_bh_enable();
324 EXPORT_SYMBOL(_read_unlock_bh);
326 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
328 _raw_write_unlock(lock);
329 local_irq_restore(flags);
330 preempt_enable();
332 EXPORT_SYMBOL(_write_unlock_irqrestore);
334 void __lockfunc _write_unlock_irq(rwlock_t *lock)
336 _raw_write_unlock(lock);
337 local_irq_enable();
338 preempt_enable();
340 EXPORT_SYMBOL(_write_unlock_irq);
342 void __lockfunc _write_unlock_bh(rwlock_t *lock)
344 _raw_write_unlock(lock);
345 preempt_enable_no_resched();
346 local_bh_enable();
348 EXPORT_SYMBOL(_write_unlock_bh);
350 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
352 local_bh_disable();
353 preempt_disable();
354 if (_raw_spin_trylock(lock))
355 return 1;
357 preempt_enable_no_resched();
358 local_bh_enable();
359 return 0;
361 EXPORT_SYMBOL(_spin_trylock_bh);
363 int in_lock_functions(unsigned long addr)
365 /* Linker adds these: start and end of __lockfunc functions */
366 extern char __lock_text_start[], __lock_text_end[];
368 return addr >= (unsigned long)__lock_text_start
369 && addr < (unsigned long)__lock_text_end;
371 EXPORT_SYMBOL(in_lock_functions);