Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / arch / ia64 / include / asm / rwsem.h
blob3027e7516d8502ea352afa38c6515925b5fe2d77
1 /*
2 * R/W semaphores for ia64
4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
8 * Based on asm-i386/rwsem.h and other architecture implementation.
10 * The MSW of the count is the negated number of active writers and
11 * waiting lockers, and the LSW is the total number of active locks.
13 * The lock count is initialized to 0 (no active and no waiting lockers).
15 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
16 * the case of an uncontended lock. Readers increment by 1 and see a positive
17 * value when uncontended, negative if there are writers (and maybe) readers
18 * waiting (in which case it goes to sleep).
21 #ifndef _ASM_IA64_RWSEM_H
22 #define _ASM_IA64_RWSEM_H
24 #ifndef _LINUX_RWSEM_H
25 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
26 #endif
28 #include <asm/intrinsics.h>
30 #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
31 #define RWSEM_ACTIVE_BIAS (1L)
32 #define RWSEM_ACTIVE_MASK (0xffffffffL)
33 #define RWSEM_WAITING_BIAS (-0x100000000L)
34 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
35 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
38 * lock for reading
40 static inline void
41 __down_read (struct rw_semaphore *sem)
43 long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
45 if (result < 0)
46 rwsem_down_read_failed(sem);
50 * lock for writing
52 static inline void
53 __down_write (struct rw_semaphore *sem)
55 long old, new;
57 do {
58 old = sem->count;
59 new = old + RWSEM_ACTIVE_WRITE_BIAS;
60 } while (cmpxchg_acq(&sem->count, old, new) != old);
62 if (old != 0)
63 rwsem_down_write_failed(sem);
67 * unlock after reading
69 static inline void
70 __up_read (struct rw_semaphore *sem)
72 long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
74 if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
75 rwsem_wake(sem);
79 * unlock after writing
81 static inline void
82 __up_write (struct rw_semaphore *sem)
84 long old, new;
86 do {
87 old = sem->count;
88 new = old - RWSEM_ACTIVE_WRITE_BIAS;
89 } while (cmpxchg_rel(&sem->count, old, new) != old);
91 if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
92 rwsem_wake(sem);
96 * trylock for reading -- returns 1 if successful, 0 if contention
98 static inline int
99 __down_read_trylock (struct rw_semaphore *sem)
101 long tmp;
102 while ((tmp = sem->count) >= 0) {
103 if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
104 return 1;
107 return 0;
111 * trylock for writing -- returns 1 if successful, 0 if contention
113 static inline int
114 __down_write_trylock (struct rw_semaphore *sem)
116 long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
117 RWSEM_ACTIVE_WRITE_BIAS);
118 return tmp == RWSEM_UNLOCKED_VALUE;
122 * downgrade write lock to read lock
124 static inline void
125 __downgrade_write (struct rw_semaphore *sem)
127 long old, new;
129 do {
130 old = sem->count;
131 new = old - RWSEM_WAITING_BIAS;
132 } while (cmpxchg_rel(&sem->count, old, new) != old);
134 if (old < 0)
135 rwsem_downgrade_wake(sem);
139 * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
140 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
142 #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
143 #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
145 #endif /* _ASM_IA64_RWSEM_H */