1 #include <linux/export.h>
2 #include <linux/lockref.h>
3 #include <linux/mutex.h>
5 #if USE_CMPXCHG_LOCKREF
8 * Allow weakly-ordered memory architectures to provide barrier-less
9 * cmpxchg semantics for lockref updates.
11 #ifndef cmpxchg64_relaxed
12 # define cmpxchg64_relaxed cmpxchg64
16 * Note that the "cmpxchg()" reloads the "old" value for the
19 #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
21 BUILD_BUG_ON(sizeof(old) != 8); \
22 old.lock_count = ACCESS_ONCE(lockref->lock_count); \
23 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
24 struct lockref new = old, prev = old; \
26 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
29 if (likely(old.lock_count == prev.lock_count)) { \
32 arch_mutex_cpu_relax(); \
38 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
43 * lockref_get - Increments reference count unconditionally
44 * @lockref: pointer to lockref structure
46 * This operation is only valid if you already hold a reference
47 * to the object, so you know the count cannot be zero.
49 void lockref_get(struct lockref
*lockref
)
57 spin_lock(&lockref
->lock
);
59 spin_unlock(&lockref
->lock
);
61 EXPORT_SYMBOL(lockref_get
);
64 * lockref_get_not_zero - Increments count unless the count is 0
65 * @lockref: pointer to lockref structure
66 * Return: 1 if count updated successfully or 0 if count was zero
68 int lockref_get_not_zero(struct lockref
*lockref
)
80 spin_lock(&lockref
->lock
);
86 spin_unlock(&lockref
->lock
);
89 EXPORT_SYMBOL(lockref_get_not_zero
);
92 * lockref_get_or_lock - Increments count unless the count is 0
93 * @lockref: pointer to lockref structure
94 * Return: 1 if count updated successfully or 0 if count was zero
95 * and we got the lock instead.
97 int lockref_get_or_lock(struct lockref
*lockref
)
107 spin_lock(&lockref
->lock
);
111 spin_unlock(&lockref
->lock
);
114 EXPORT_SYMBOL(lockref_get_or_lock
);
117 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
118 * @lockref: pointer to lockref structure
119 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
121 int lockref_put_or_lock(struct lockref
*lockref
)
131 spin_lock(&lockref
->lock
);
132 if (lockref
->count
<= 1)
135 spin_unlock(&lockref
->lock
);
138 EXPORT_SYMBOL(lockref_put_or_lock
);
141 * lockref_mark_dead - mark lockref dead
142 * @lockref: pointer to lockref structure
144 void lockref_mark_dead(struct lockref
*lockref
)
146 assert_spin_locked(&lockref
->lock
);
147 lockref
->count
= -128;
149 EXPORT_SYMBOL(lockref_mark_dead
);
152 * lockref_get_not_dead - Increments count unless the ref is dead
153 * @lockref: pointer to lockref structure
154 * Return: 1 if count updated successfully or 0 if lockref was dead
156 int lockref_get_not_dead(struct lockref
*lockref
)
162 if ((int)old
.count
< 0)
168 spin_lock(&lockref
->lock
);
170 if ((int) lockref
->count
>= 0) {
174 spin_unlock(&lockref
->lock
);
177 EXPORT_SYMBOL(lockref_get_not_dead
);