Linux v2.6.15-rc6
[pohmelfs.git] / include / linux / rcuref.h
blobe1adbba14b67283fc534e8431b97ad7b694d4adb
1 /*
2 * rcuref.h
4 * Reference counting for elements of lists/arrays protected by
5 * RCU.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * Copyright (C) IBM Corporation, 2005
23 * Author: Dipankar Sarma <dipankar@in.ibm.com>
24 * Ravikiran Thirumalai <kiran_th@gmail.com>
26 * See Documentation/RCU/rcuref.txt for detailed user guide.
30 #ifndef _RCUREF_H_
31 #define _RCUREF_H_
33 #ifdef __KERNEL__
35 #include <linux/types.h>
36 #include <linux/interrupt.h>
37 #include <linux/spinlock.h>
38 #include <asm/atomic.h>
41 * These APIs work on traditional atomic_t counters used in the
42 * kernel for reference counting. Under special circumstances
43 * where a lock-free get() operation races with a put() operation
44 * these APIs can be used. See Documentation/RCU/rcuref.txt.
47 #ifdef __HAVE_ARCH_CMPXCHG
49 /**
50 * rcuref_inc - increment refcount for object.
51 * @rcuref: reference counter in the object in question.
53 * This should be used only for objects where we use RCU and
54 * use the rcuref_inc_lf() api to acquire a reference
55 * in a lock-free reader-side critical section.
57 static inline void rcuref_inc(atomic_t *rcuref)
59 atomic_inc(rcuref);
62 /**
63 * rcuref_dec - decrement refcount for object.
64 * @rcuref: reference counter in the object in question.
66 * This should be used only for objects where we use RCU and
67 * use the rcuref_inc_lf() api to acquire a reference
68 * in a lock-free reader-side critical section.
70 static inline void rcuref_dec(atomic_t *rcuref)
72 atomic_dec(rcuref);
75 /**
76 * rcuref_dec_and_test - decrement refcount for object and test
77 * @rcuref: reference counter in the object.
78 * @release: pointer to the function that will clean up the object
79 * when the last reference to the object is released.
80 * This pointer is required.
82 * Decrement the refcount, and if 0, return 1. Else return 0.
84 * This should be used only for objects where we use RCU and
85 * use the rcuref_inc_lf() api to acquire a reference
86 * in a lock-free reader-side critical section.
88 static inline int rcuref_dec_and_test(atomic_t *rcuref)
90 return atomic_dec_and_test(rcuref);
94 * cmpxchg is needed on UP too, if deletions to the list/array can happen
95 * in interrupt context.
98 /**
99 * rcuref_inc_lf - Take reference to an object in a read-side
100 * critical section protected by RCU.
101 * @rcuref: reference counter in the object in question.
103 * Try and increment the refcount by 1. The increment might fail if
104 * the reference counter has been through a 1 to 0 transition and
105 * is no longer part of the lock-free list.
106 * Returns non-zero on successful increment and zero otherwise.
108 static inline int rcuref_inc_lf(atomic_t *rcuref)
110 int c, old;
111 c = atomic_read(rcuref);
112 while (c && (old = cmpxchg(&rcuref->counter, c, c + 1)) != c)
113 c = old;
114 return c;
117 #else /* !__HAVE_ARCH_CMPXCHG */
119 extern spinlock_t __rcuref_hash[];
122 * Use a hash table of locks to protect the reference count
123 * since cmpxchg is not available in this arch.
125 #ifdef CONFIG_SMP
126 #define RCUREF_HASH_SIZE 4
127 #define RCUREF_HASH(k) \
128 (&__rcuref_hash[(((unsigned long)k)>>8) & (RCUREF_HASH_SIZE-1)])
129 #else
130 #define RCUREF_HASH_SIZE 1
131 #define RCUREF_HASH(k) &__rcuref_hash[0]
132 #endif /* CONFIG_SMP */
135 * rcuref_inc - increment refcount for object.
136 * @rcuref: reference counter in the object in question.
138 * This should be used only for objects where we use RCU and
139 * use the rcuref_inc_lf() api to acquire a reference in a lock-free
140 * reader-side critical section.
142 static inline void rcuref_inc(atomic_t *rcuref)
144 unsigned long flags;
145 spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
146 rcuref->counter += 1;
147 spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
151 * rcuref_dec - decrement refcount for object.
152 * @rcuref: reference counter in the object in question.
154 * This should be used only for objects where we use RCU and
155 * use the rcuref_inc_lf() api to acquire a reference in a lock-free
156 * reader-side critical section.
158 static inline void rcuref_dec(atomic_t *rcuref)
160 unsigned long flags;
161 spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
162 rcuref->counter -= 1;
163 spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
167 * rcuref_dec_and_test - decrement refcount for object and test
168 * @rcuref: reference counter in the object.
169 * @release: pointer to the function that will clean up the object
170 * when the last reference to the object is released.
171 * This pointer is required.
173 * Decrement the refcount, and if 0, return 1. Else return 0.
175 * This should be used only for objects where we use RCU and
176 * use the rcuref_inc_lf() api to acquire a reference in a lock-free
177 * reader-side critical section.
179 static inline int rcuref_dec_and_test(atomic_t *rcuref)
181 unsigned long flags;
182 spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
183 rcuref->counter--;
184 if (!rcuref->counter) {
185 spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
186 return 1;
187 } else {
188 spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
189 return 0;
194 * rcuref_inc_lf - Take reference to an object of a lock-free collection
195 * by traversing a lock-free list/array.
196 * @rcuref: reference counter in the object in question.
198 * Try and increment the refcount by 1. The increment might fail if
199 * the reference counter has been through a 1 to 0 transition and
200 * object is no longer part of the lock-free list.
201 * Returns non-zero on successful increment and zero otherwise.
203 static inline int rcuref_inc_lf(atomic_t *rcuref)
205 int ret;
206 unsigned long flags;
207 spin_lock_irqsave(RCUREF_HASH(rcuref), flags);
208 if (rcuref->counter)
209 ret = rcuref->counter++;
210 else
211 ret = 0;
212 spin_unlock_irqrestore(RCUREF_HASH(rcuref), flags);
213 return ret;
217 #endif /* !__HAVE_ARCH_CMPXCHG */
219 #endif /* __KERNEL__ */
220 #endif /* _RCUREF_H_ */