Staging: hv: mousevsc: Change the allocation flags to reflect interrupt context
[zen-stable.git] / arch / xtensa / include / asm / rwsem.h
blob249619e7e7f2aa161060e7f9eb64686b9116e8b8
1 /*
2 * include/asm-xtensa/rwsem.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
8 * Largely copied from include/asm-ppc/rwsem.h
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
13 #ifndef _XTENSA_RWSEM_H
14 #define _XTENSA_RWSEM_H
16 #ifndef _LINUX_RWSEM_H
17 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
18 #endif
20 #define RWSEM_UNLOCKED_VALUE 0x00000000
21 #define RWSEM_ACTIVE_BIAS 0x00000001
22 #define RWSEM_ACTIVE_MASK 0x0000ffff
23 #define RWSEM_WAITING_BIAS (-0x00010000)
24 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
25 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
28 * lock for reading
30 static inline void __down_read(struct rw_semaphore *sem)
32 if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
33 smp_wmb();
34 else
35 rwsem_down_read_failed(sem);
38 static inline int __down_read_trylock(struct rw_semaphore *sem)
40 int tmp;
42 while ((tmp = sem->count) >= 0) {
43 if (tmp == cmpxchg(&sem->count, tmp,
44 tmp + RWSEM_ACTIVE_READ_BIAS)) {
45 smp_wmb();
46 return 1;
49 return 0;
53 * lock for writing
55 static inline void __down_write(struct rw_semaphore *sem)
57 int tmp;
59 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
60 (atomic_t *)(&sem->count));
61 if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
62 smp_wmb();
63 else
64 rwsem_down_write_failed(sem);
67 static inline int __down_write_trylock(struct rw_semaphore *sem)
69 int tmp;
71 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
72 RWSEM_ACTIVE_WRITE_BIAS);
73 smp_wmb();
74 return tmp == RWSEM_UNLOCKED_VALUE;
78 * unlock after reading
80 static inline void __up_read(struct rw_semaphore *sem)
82 int tmp;
84 smp_wmb();
85 tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
86 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
87 rwsem_wake(sem);
91 * unlock after writing
93 static inline void __up_write(struct rw_semaphore *sem)
95 smp_wmb();
96 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
97 (atomic_t *)(&sem->count)) < 0)
98 rwsem_wake(sem);
102 * implement atomic add functionality
104 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
106 atomic_add(delta, (atomic_t *)(&sem->count));
110 * downgrade write lock to read lock
112 static inline void __downgrade_write(struct rw_semaphore *sem)
114 int tmp;
116 smp_wmb();
117 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
118 if (tmp < 0)
119 rwsem_downgrade_wake(sem);
123 * implement exchange and add functionality
125 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
127 smp_mb();
128 return atomic_add_return(delta, (atomic_t *)(&sem->count));
131 #endif /* _XTENSA_RWSEM_H */