spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / arch / sh / include / asm / rwsem.h
blobedab57265293936cc3b6ecaed0cfc8fa609eb983
1 /*
2 * include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
3 * in lib/rwsem.c.
4 */
6 #ifndef _ASM_SH_RWSEM_H
7 #define _ASM_SH_RWSEM_H
9 #ifndef _LINUX_RWSEM_H
10 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
11 #endif
13 #ifdef __KERNEL__
15 #define RWSEM_UNLOCKED_VALUE 0x00000000
16 #define RWSEM_ACTIVE_BIAS 0x00000001
17 #define RWSEM_ACTIVE_MASK 0x0000ffff
18 #define RWSEM_WAITING_BIAS (-0x00010000)
19 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
20 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
23 * lock for reading
25 static inline void __down_read(struct rw_semaphore *sem)
27 if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
28 smp_wmb();
29 else
30 rwsem_down_read_failed(sem);
33 static inline int __down_read_trylock(struct rw_semaphore *sem)
35 int tmp;
37 while ((tmp = sem->count) >= 0) {
38 if (tmp == cmpxchg(&sem->count, tmp,
39 tmp + RWSEM_ACTIVE_READ_BIAS)) {
40 smp_wmb();
41 return 1;
44 return 0;
48 * lock for writing
50 static inline void __down_write(struct rw_semaphore *sem)
52 int tmp;
54 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
55 (atomic_t *)(&sem->count));
56 if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
57 smp_wmb();
58 else
59 rwsem_down_write_failed(sem);
62 static inline int __down_write_trylock(struct rw_semaphore *sem)
64 int tmp;
66 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
67 RWSEM_ACTIVE_WRITE_BIAS);
68 smp_wmb();
69 return tmp == RWSEM_UNLOCKED_VALUE;
73 * unlock after reading
75 static inline void __up_read(struct rw_semaphore *sem)
77 int tmp;
79 smp_wmb();
80 tmp = atomic_dec_return((atomic_t *)(&sem->count));
81 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
82 rwsem_wake(sem);
86 * unlock after writing
88 static inline void __up_write(struct rw_semaphore *sem)
90 smp_wmb();
91 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
92 (atomic_t *)(&sem->count)) < 0)
93 rwsem_wake(sem);
97 * implement atomic add functionality
99 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
101 atomic_add(delta, (atomic_t *)(&sem->count));
105 * downgrade write lock to read lock
107 static inline void __downgrade_write(struct rw_semaphore *sem)
109 int tmp;
111 smp_wmb();
112 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
113 if (tmp < 0)
114 rwsem_downgrade_wake(sem);
117 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
119 __down_write(sem);
123 * implement exchange and add functionality
125 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
127 smp_mb();
128 return atomic_add_return(delta, (atomic_t *)(&sem->count));
131 #endif /* __KERNEL__ */
132 #endif /* _ASM_SH_RWSEM_H */