Staging: hv: mousevsc: Change the allocation flags to reflect interrupt context
[zen-stable.git] / arch / sparc / include / asm / rwsem.h
blob069bf4d663a119f90aedeb484a3603e2a8b6f2b7
1 /*
2 * rwsem.h: R/W semaphores implemented using CAS
4 * Written by David S. Miller (davem@redhat.com), 2001.
5 * Derived from asm-i386/rwsem.h
6 */
7 #ifndef _SPARC64_RWSEM_H
8 #define _SPARC64_RWSEM_H
10 #ifndef _LINUX_RWSEM_H
11 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12 #endif
14 #ifdef __KERNEL__
16 #define RWSEM_UNLOCKED_VALUE 0x00000000L
17 #define RWSEM_ACTIVE_BIAS 0x00000001L
18 #define RWSEM_ACTIVE_MASK 0xffffffffL
19 #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
20 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
21 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
24 * lock for reading
26 static inline void __down_read(struct rw_semaphore *sem)
28 if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
29 rwsem_down_read_failed(sem);
32 static inline int __down_read_trylock(struct rw_semaphore *sem)
34 long tmp;
36 while ((tmp = sem->count) >= 0L) {
37 if (tmp == cmpxchg(&sem->count, tmp,
38 tmp + RWSEM_ACTIVE_READ_BIAS)) {
39 return 1;
42 return 0;
46 * lock for writing
48 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
50 long tmp;
52 tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
53 (atomic64_t *)(&sem->count));
54 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
55 rwsem_down_write_failed(sem);
58 static inline void __down_write(struct rw_semaphore *sem)
60 __down_write_nested(sem, 0);
63 static inline int __down_write_trylock(struct rw_semaphore *sem)
65 long tmp;
67 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
68 RWSEM_ACTIVE_WRITE_BIAS);
69 return tmp == RWSEM_UNLOCKED_VALUE;
73 * unlock after reading
75 static inline void __up_read(struct rw_semaphore *sem)
77 long tmp;
79 tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
80 if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
81 rwsem_wake(sem);
85 * unlock after writing
87 static inline void __up_write(struct rw_semaphore *sem)
89 if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
90 (atomic64_t *)(&sem->count)) < 0L))
91 rwsem_wake(sem);
95 * implement atomic add functionality
97 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
99 atomic64_add(delta, (atomic64_t *)(&sem->count));
103 * downgrade write lock to read lock
105 static inline void __downgrade_write(struct rw_semaphore *sem)
107 long tmp;
109 tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
110 if (tmp < 0L)
111 rwsem_downgrade_wake(sem);
115 * implement exchange and add functionality
117 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
119 return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
122 #endif /* __KERNEL__ */
124 #endif /* _SPARC64_RWSEM_H */