1 #include <linux/export.h>
2 #include <linux/lockref.h>
4 #if USE_CMPXCHG_LOCKREF
7 * Allow weakly-ordered memory architectures to provide barrier-less
8 * cmpxchg semantics for lockref updates.
10 #ifndef cmpxchg64_relaxed
11 # define cmpxchg64_relaxed cmpxchg64
15 * Note that the "cmpxchg()" reloads the "old" value for the
18 #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
20 BUILD_BUG_ON(sizeof(old) != 8); \
21 old.lock_count = ACCESS_ONCE(lockref->lock_count); \
22 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
23 struct lockref new = old, prev = old; \
25 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
28 if (likely(old.lock_count == prev.lock_count)) { \
31 cpu_relax_lowlatency(); \
37 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
42 * lockref_get - Increments reference count unconditionally
43 * @lockref: pointer to lockref structure
45 * This operation is only valid if you already hold a reference
46 * to the object, so you know the count cannot be zero.
48 void lockref_get(struct lockref
*lockref
)
56 spin_lock(&lockref
->lock
);
58 spin_unlock(&lockref
->lock
);
60 EXPORT_SYMBOL(lockref_get
);
63 * lockref_get_not_zero - Increments count unless the count is 0 or dead
64 * @lockref: pointer to lockref structure
65 * Return: 1 if count updated successfully or 0 if count was zero
67 int lockref_get_not_zero(struct lockref
*lockref
)
79 spin_lock(&lockref
->lock
);
81 if (lockref
->count
> 0) {
85 spin_unlock(&lockref
->lock
);
88 EXPORT_SYMBOL(lockref_get_not_zero
);
91 * lockref_get_or_lock - Increments count unless the count is 0 or dead
92 * @lockref: pointer to lockref structure
93 * Return: 1 if count updated successfully or 0 if count was zero
94 * and we got the lock instead.
96 int lockref_get_or_lock(struct lockref
*lockref
)
106 spin_lock(&lockref
->lock
);
107 if (lockref
->count
<= 0)
110 spin_unlock(&lockref
->lock
);
113 EXPORT_SYMBOL(lockref_get_or_lock
);
116 * lockref_put_return - Decrement reference count if possible
117 * @lockref: pointer to lockref structure
119 * Decrement the reference count and return the new value.
120 * If the lockref was dead or locked, return an error.
122 int lockref_put_return(struct lockref
*lockref
)
133 EXPORT_SYMBOL(lockref_put_return
);
136 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
137 * @lockref: pointer to lockref structure
138 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
140 int lockref_put_or_lock(struct lockref
*lockref
)
150 spin_lock(&lockref
->lock
);
151 if (lockref
->count
<= 1)
154 spin_unlock(&lockref
->lock
);
157 EXPORT_SYMBOL(lockref_put_or_lock
);
160 * lockref_mark_dead - mark lockref dead
161 * @lockref: pointer to lockref structure
163 void lockref_mark_dead(struct lockref
*lockref
)
165 assert_spin_locked(&lockref
->lock
);
166 lockref
->count
= -128;
168 EXPORT_SYMBOL(lockref_mark_dead
);
171 * lockref_get_not_dead - Increments count unless the ref is dead
172 * @lockref: pointer to lockref structure
173 * Return: 1 if count updated successfully or 0 if lockref was dead
175 int lockref_get_not_dead(struct lockref
*lockref
)
187 spin_lock(&lockref
->lock
);
189 if (lockref
->count
>= 0) {
193 spin_unlock(&lockref
->lock
);
196 EXPORT_SYMBOL(lockref_get_not_dead
);