2 * rwsem.h: R/W semaphores implemented using CAS
4 * Written by David S. Miller (davem@redhat.com), 2001.
5 * Derived from asm-i386/rwsem.h
7 #ifndef _SPARC64_RWSEM_H
8 #define _SPARC64_RWSEM_H
10 #ifndef _LINUX_RWSEM_H
11 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
23 #define RWSEM_UNLOCKED_VALUE 0x00000000L
24 #define RWSEM_ACTIVE_BIAS 0x00000001L
25 #define RWSEM_ACTIVE_MASK 0xffffffffL
26 #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
27 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
28 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
30 struct list_head wait_list
;
31 #ifdef CONFIG_DEBUG_LOCK_ALLOC
32 struct lockdep_map dep_map
;
36 #ifdef CONFIG_DEBUG_LOCK_ALLOC
37 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
39 # define __RWSEM_DEP_MAP_INIT(lockname)
42 #define __RWSEM_INITIALIZER(name) \
43 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
44 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
46 #define DECLARE_RWSEM(name) \
47 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
49 extern struct rw_semaphore
*rwsem_down_read_failed(struct rw_semaphore
*sem
);
50 extern struct rw_semaphore
*rwsem_down_write_failed(struct rw_semaphore
*sem
);
51 extern struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*sem
);
52 extern struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
);
54 extern void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
55 struct lock_class_key
*key
);
57 #define init_rwsem(sem) \
59 static struct lock_class_key __key; \
61 __init_rwsem((sem), #sem, &__key); \
67 static inline void __down_read(struct rw_semaphore
*sem
)
69 if (unlikely(atomic64_inc_return((atomic64_t
*)(&sem
->count
)) <= 0L))
70 rwsem_down_read_failed(sem
);
73 static inline int __down_read_trylock(struct rw_semaphore
*sem
)
77 while ((tmp
= sem
->count
) >= 0L) {
78 if (tmp
== cmpxchg(&sem
->count
, tmp
,
79 tmp
+ RWSEM_ACTIVE_READ_BIAS
)) {
89 static inline void __down_write_nested(struct rw_semaphore
*sem
, int subclass
)
93 tmp
= atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS
,
94 (atomic64_t
*)(&sem
->count
));
95 if (unlikely(tmp
!= RWSEM_ACTIVE_WRITE_BIAS
))
96 rwsem_down_write_failed(sem
);
99 static inline void __down_write(struct rw_semaphore
*sem
)
101 __down_write_nested(sem
, 0);
104 static inline int __down_write_trylock(struct rw_semaphore
*sem
)
108 tmp
= cmpxchg(&sem
->count
, RWSEM_UNLOCKED_VALUE
,
109 RWSEM_ACTIVE_WRITE_BIAS
);
110 return tmp
== RWSEM_UNLOCKED_VALUE
;
114 * unlock after reading
116 static inline void __up_read(struct rw_semaphore
*sem
)
120 tmp
= atomic64_dec_return((atomic64_t
*)(&sem
->count
));
121 if (unlikely(tmp
< -1L && (tmp
& RWSEM_ACTIVE_MASK
) == 0L))
126 * unlock after writing
128 static inline void __up_write(struct rw_semaphore
*sem
)
130 if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS
,
131 (atomic64_t
*)(&sem
->count
)) < 0L))
136 * implement atomic add functionality
138 static inline void rwsem_atomic_add(long delta
, struct rw_semaphore
*sem
)
140 atomic64_add(delta
, (atomic64_t
*)(&sem
->count
));
144 * downgrade write lock to read lock
146 static inline void __downgrade_write(struct rw_semaphore
*sem
)
150 tmp
= atomic64_add_return(-RWSEM_WAITING_BIAS
, (atomic64_t
*)(&sem
->count
));
152 rwsem_downgrade_wake(sem
);
156 * implement exchange and add functionality
158 static inline long rwsem_atomic_update(long delta
, struct rw_semaphore
*sem
)
160 return atomic64_add_return(delta
, (atomic64_t
*)(&sem
->count
));
163 static inline int rwsem_is_locked(struct rw_semaphore
*sem
)
165 return (sem
->count
!= 0);
168 #endif /* __KERNEL__ */
170 #endif /* _SPARC64_RWSEM_H */