1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/system.h>
6 /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
7 * since it only has load-and-zero. Moreover, at least on some PA processors,
8 * the semaphore address has to be 16-byte aligned.
11 #ifndef CONFIG_DEBUG_SPINLOCK
13 #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
14 #undef SPIN_LOCK_UNLOCKED
15 #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
17 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
19 static inline int spin_is_locked(spinlock_t
*x
)
21 volatile unsigned int *a
= __ldcw_align(x
);
25 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
26 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
28 static inline void _raw_spin_lock(spinlock_t
*x
)
30 volatile unsigned int *a
;
34 while (__ldcw(a
) == 0)
39 static inline void _raw_spin_unlock(spinlock_t
*x
)
41 volatile unsigned int *a
;
48 static inline int _raw_spin_trylock(spinlock_t
*x
)
50 volatile unsigned int *a
;
61 #define spin_lock_own(LOCK, LOCATION) ((void)0)
63 #else /* !(CONFIG_DEBUG_SPINLOCK) */
65 #define SPINLOCK_MAGIC 0x1D244B3C
67 #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
68 #undef SPIN_LOCK_UNLOCKED
69 #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
71 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
73 #define CHECK_LOCK(x) \
75 if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \
76 printk(KERN_ERR "%s:%d: spin_is_locked" \
77 " on uninitialized spinlock %p.\n", \
78 __FILE__, __LINE__, (x)); \
82 #define spin_is_locked(x) \
85 volatile unsigned int *a = __ldcw_align(x); \
86 if (unlikely((*a == 0) && (x)->babble)) { \
88 printk("KERN_WARNING \
89 %s:%d: spin_is_locked(%s/%p) already" \
90 " locked by %s:%d in %s at %p(%d)\n", \
91 __FILE__,__LINE__, (x)->module, (x), \
92 (x)->bfile, (x)->bline, (x)->task->comm,\
93 (x)->previous, (x)->oncpu); \
98 #define spin_unlock_wait(x) \
101 volatile unsigned int *a = __ldcw_align(x); \
102 if (unlikely((*a == 0) && (x)->babble)) { \
104 printk("KERN_WARNING \
105 %s:%d: spin_unlock_wait(%s/%p)" \
106 " owned by %s:%d in %s at %p(%d)\n", \
107 __FILE__,__LINE__, (x)->module, (x), \
108 (x)->bfile, (x)->bline, (x)->task->comm,\
109 (x)->previous, (x)->oncpu); \
112 } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
114 extern void _dbg_spin_lock(spinlock_t
*lock
, const char *base_file
, int line_no
);
115 extern void _dbg_spin_unlock(spinlock_t
*lock
, const char *, int);
116 extern int _dbg_spin_trylock(spinlock_t
* lock
, const char *, int);
118 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
120 #define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
121 #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
122 #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
124 /* just in case we need it */
125 #define spin_lock_own(LOCK, LOCATION) \
127 volatile unsigned int *a = __ldcw_align(LOCK); \
128 if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \
129 printk("KERN_WARNING \
130 %s: called on %d from %p but lock %s on %d\n", \
131 LOCATION, smp_processor_id(), \
132 __builtin_return_address(0), \
133 (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
136 #endif /* !(CONFIG_DEBUG_SPINLOCK) */
139 * Read-write spinlocks, allowing multiple readers
140 * but only one writer.
144 volatile int counter
;
145 #ifdef CONFIG_PREEMPT
146 unsigned int break_lock
;
150 #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
152 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
154 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
156 /* read_lock, read_unlock are pretty straightforward. Of course it somehow
157 * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
159 #ifdef CONFIG_DEBUG_RWLOCK
160 extern void _dbg_read_lock(rwlock_t
* rw
, const char *bfile
, int bline
);
161 #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
163 static __inline__
void _raw_read_lock(rwlock_t
*rw
)
166 local_irq_save(flags
);
167 _raw_spin_lock(&rw
->lock
);
171 _raw_spin_unlock(&rw
->lock
);
172 local_irq_restore(flags
);
174 #endif /* CONFIG_DEBUG_RWLOCK */
176 static __inline__
void _raw_read_unlock(rwlock_t
*rw
)
179 local_irq_save(flags
);
180 _raw_spin_lock(&rw
->lock
);
184 _raw_spin_unlock(&rw
->lock
);
185 local_irq_restore(flags
);
188 /* write_lock is less trivial. We optimistically grab the lock and check
189 * if we surprised any readers. If so we release the lock and wait till
190 * they're all gone before trying again
192 * Also note that we don't use the _irqsave / _irqrestore suffixes here.
193 * If we're called with interrupts enabled and we've got readers (or other
194 * writers) in interrupt handlers someone fucked up and we'd dead-lock
195 * sooner or later anyway. prumpf */
197 #ifdef CONFIG_DEBUG_RWLOCK
198 extern void _dbg_write_lock(rwlock_t
* rw
, const char *bfile
, int bline
);
199 #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
201 static __inline__
void _raw_write_lock(rwlock_t
*rw
)
204 _raw_spin_lock(&rw
->lock
);
206 if(rw
->counter
!= 0) {
207 /* this basically never happens */
208 _raw_spin_unlock(&rw
->lock
);
210 while(rw
->counter
!= 0);
215 /* got it. now leave without unlocking */
216 rw
->counter
= -1; /* remember we are locked */
218 #endif /* CONFIG_DEBUG_RWLOCK */
220 /* write_unlock is absolutely trivial - we don't have to wait for anything */
222 static __inline__
void _raw_write_unlock(rwlock_t
*rw
)
225 _raw_spin_unlock(&rw
->lock
);
228 #ifdef CONFIG_DEBUG_RWLOCK
229 extern int _dbg_write_trylock(rwlock_t
* rw
, const char *bfile
, int bline
);
230 #define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__)
232 static __inline__
int _raw_write_trylock(rwlock_t
*rw
)
234 _raw_spin_lock(&rw
->lock
);
235 if (rw
->counter
!= 0) {
236 /* this basically never happens */
237 _raw_spin_unlock(&rw
->lock
);
242 /* got it. now leave without unlocking */
243 rw
->counter
= -1; /* remember we are locked */
246 #endif /* CONFIG_DEBUG_RWLOCK */
248 static __inline__
int is_read_locked(rwlock_t
*rw
)
250 return rw
->counter
> 0;
253 static __inline__
int is_write_locked(rwlock_t
*rw
)
255 return rw
->counter
< 0;
258 #endif /* __ASM_SPINLOCK_H */