1 #define pr_fmt(fmt) "%s: " fmt "\n", __func__
3 #include <linux/kernel.h>
4 #include <linux/percpu-refcount.h>
7 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
8 * don't try to detect the ref hitting 0 - which means that get/put can just
9 * increment or decrement the local counter. Note that the counter on a
10 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
11 * percpu counters will all sum to the correct value
13 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * pcpu_count vars will be equal to what it would have been if all the gets and
15 * puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow).
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect
19 * the ref hitting 0 on every put - this would require global synchronization
20 * and defeat the whole purpose of using percpu refs.
22 * What we do is require the user to keep track of the initial refcount; we know
23 * the ref can't hit 0 before the user drops the initial ref, so as long as we
24 * convert to non percpu mode before the initial ref is dropped everything
27 * Converting to non percpu mode is done with some RCUish stuff in
28 * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
29 * can't hit 0 before we've added up all the percpu refs.
32 #define PCPU_COUNT_BIAS (1U << 31)
35 * percpu_ref_init - initialize a percpu refcount
36 * @ref: percpu_ref to initialize
37 * @release: function which will be called when refcount hits 0
39 * Initializes the refcount in single atomic counter mode with a refcount of 1;
40 * analagous to atomic_set(ref, 1).
42 * Note that @release must not sleep - it may potentially be called from RCU
43 * callback context by percpu_ref_kill().
45 int percpu_ref_init(struct percpu_ref
*ref
, percpu_ref_func_t
*release
)
47 atomic_set(&ref
->count
, 1 + PCPU_COUNT_BIAS
);
49 ref
->pcpu_count
= alloc_percpu(unsigned);
53 ref
->release
= release
;
56 EXPORT_SYMBOL_GPL(percpu_ref_init
);
59 * percpu_ref_cancel_init - cancel percpu_ref_init()
60 * @ref: percpu_ref to cancel init for
62 * Once a percpu_ref is initialized, its destruction is initiated by
63 * percpu_ref_kill() and completes asynchronously, which can be painful to
64 * do when destroying a half-constructed object in init failure path.
66 * This function destroys @ref without invoking @ref->release and the
67 * memory area containing it can be freed immediately on return. To
68 * prevent accidental misuse, it's required that @ref has finished
69 * percpu_ref_init(), whether successful or not, but never used.
71 * The weird name and usage restriction are to prevent people from using
72 * this function by mistake for normal shutdown instead of
75 void percpu_ref_cancel_init(struct percpu_ref
*ref
)
77 unsigned __percpu
*pcpu_count
= ref
->pcpu_count
;
80 WARN_ON_ONCE(atomic_read(&ref
->count
) != 1 + PCPU_COUNT_BIAS
);
83 for_each_possible_cpu(cpu
)
84 WARN_ON_ONCE(*per_cpu_ptr(pcpu_count
, cpu
));
85 free_percpu(ref
->pcpu_count
);
88 EXPORT_SYMBOL_GPL(percpu_ref_cancel_init
);
90 static void percpu_ref_kill_rcu(struct rcu_head
*rcu
)
92 struct percpu_ref
*ref
= container_of(rcu
, struct percpu_ref
, rcu
);
93 unsigned __percpu
*pcpu_count
= ref
->pcpu_count
;
97 /* Mask out PCPU_REF_DEAD */
98 pcpu_count
= (unsigned __percpu
*)
99 (((unsigned long) pcpu_count
) & ~PCPU_STATUS_MASK
);
101 for_each_possible_cpu(cpu
)
102 count
+= *per_cpu_ptr(pcpu_count
, cpu
);
104 free_percpu(pcpu_count
);
106 pr_debug("global %i pcpu %i", atomic_read(&ref
->count
), (int) count
);
109 * It's crucial that we sum the percpu counters _before_ adding the sum
110 * to &ref->count; since gets could be happening on one cpu while puts
111 * happen on another, adding a single cpu's count could cause
112 * @ref->count to hit 0 before we've got a consistent value - but the
113 * sum of all the counts will be consistent and correct.
115 * Subtracting the bias value then has to happen _after_ adding count to
116 * &ref->count; we need the bias value to prevent &ref->count from
117 * reaching 0 before we add the percpu counts. But doing it at the same
118 * time is equivalent and saves us atomic operations:
121 atomic_add((int) count
- PCPU_COUNT_BIAS
, &ref
->count
);
123 WARN_ONCE(atomic_read(&ref
->count
) <= 0, "percpu ref <= 0 (%i)",
124 atomic_read(&ref
->count
));
126 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
127 if (ref
->confirm_kill
)
128 ref
->confirm_kill(ref
);
131 * Now we're in single atomic_t mode with a consistent refcount, so it's
132 * safe to drop our initial ref:
138 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
139 * @ref: percpu_ref to kill
140 * @confirm_kill: optional confirmation callback
142 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
143 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
144 * called after @ref is seen as dead from all CPUs - all further
145 * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
148 * Due to the way percpu_ref is implemented, @confirm_kill will be called
149 * after at least one full RCU grace period has passed but this is an
150 * implementation detail and callers must not depend on it.
152 void percpu_ref_kill_and_confirm(struct percpu_ref
*ref
,
153 percpu_ref_func_t
*confirm_kill
)
155 WARN_ONCE(REF_STATUS(ref
->pcpu_count
) == PCPU_REF_DEAD
,
156 "percpu_ref_kill() called more than once!\n");
158 ref
->pcpu_count
= (unsigned __percpu
*)
159 (((unsigned long) ref
->pcpu_count
)|PCPU_REF_DEAD
);
160 ref
->confirm_kill
= confirm_kill
;
162 call_rcu_sched(&ref
->rcu
, percpu_ref_kill_rcu
);
164 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm
);