2 * include/asm-ppc/rwsem.h: R/W semaphores for SH using the stuff
6 #ifndef _ASM_SH_RWSEM_H
7 #define _ASM_SH_RWSEM_H
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <asm/atomic.h>
13 #include <asm/system.h>
16 * the semaphore definition
20 #define RWSEM_UNLOCKED_VALUE 0x00000000
21 #define RWSEM_ACTIVE_BIAS 0x00000001
22 #define RWSEM_ACTIVE_MASK 0x0000ffff
23 #define RWSEM_WAITING_BIAS (-0x00010000)
24 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
25 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
27 struct list_head wait_list
;
30 #define __RWSEM_INITIALIZER(name) \
31 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
32 LIST_HEAD_INIT((name).wait_list) }
34 #define DECLARE_RWSEM(name) \
35 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
37 extern struct rw_semaphore
*rwsem_down_read_failed(struct rw_semaphore
*sem
);
38 extern struct rw_semaphore
*rwsem_down_write_failed(struct rw_semaphore
*sem
);
39 extern struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*sem
);
40 extern struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
);
42 static inline void init_rwsem(struct rw_semaphore
*sem
)
44 sem
->count
= RWSEM_UNLOCKED_VALUE
;
45 spin_lock_init(&sem
->wait_lock
);
46 INIT_LIST_HEAD(&sem
->wait_list
);
52 static inline void __down_read(struct rw_semaphore
*sem
)
54 if (atomic_inc_return((atomic_t
*)(&sem
->count
)) > 0)
57 rwsem_down_read_failed(sem
);
60 static inline int __down_read_trylock(struct rw_semaphore
*sem
)
64 while ((tmp
= sem
->count
) >= 0) {
65 if (tmp
== cmpxchg(&sem
->count
, tmp
,
66 tmp
+ RWSEM_ACTIVE_READ_BIAS
)) {
77 static inline void __down_write(struct rw_semaphore
*sem
)
81 tmp
= atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS
,
82 (atomic_t
*)(&sem
->count
));
83 if (tmp
== RWSEM_ACTIVE_WRITE_BIAS
)
86 rwsem_down_write_failed(sem
);
89 static inline int __down_write_trylock(struct rw_semaphore
*sem
)
93 tmp
= cmpxchg(&sem
->count
, RWSEM_UNLOCKED_VALUE
,
94 RWSEM_ACTIVE_WRITE_BIAS
);
96 return tmp
== RWSEM_UNLOCKED_VALUE
;
100 * unlock after reading
102 static inline void __up_read(struct rw_semaphore
*sem
)
107 tmp
= atomic_dec_return((atomic_t
*)(&sem
->count
));
108 if (tmp
< -1 && (tmp
& RWSEM_ACTIVE_MASK
) == 0)
113 * unlock after writing
115 static inline void __up_write(struct rw_semaphore
*sem
)
118 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS
,
119 (atomic_t
*)(&sem
->count
)) < 0)
124 * implement atomic add functionality
126 static inline void rwsem_atomic_add(int delta
, struct rw_semaphore
*sem
)
128 atomic_add(delta
, (atomic_t
*)(&sem
->count
));
132 * downgrade write lock to read lock
134 static inline void __downgrade_write(struct rw_semaphore
*sem
)
139 tmp
= atomic_add_return(-RWSEM_WAITING_BIAS
, (atomic_t
*)(&sem
->count
));
141 rwsem_downgrade_wake(sem
);
145 * implement exchange and add functionality
147 static inline int rwsem_atomic_update(int delta
, struct rw_semaphore
*sem
)
150 return atomic_add_return(delta
, (atomic_t
*)(&sem
->count
));
153 static inline int rwsem_is_locked(struct rw_semaphore
*sem
)
155 return (sem
->count
!= 0);
158 #endif /* __KERNEL__ */
159 #endif /* _ASM_SH_RWSEM_H */