drm/rockchip: vop2: Support 32x8 superblock afbc
[drm/drm-misc.git] / kernel / rcu / tiny.c
blobb3b3ce34df6310f7bddba40b2be1bdf6c9f00232
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
5 * Copyright IBM Corporation, 2008
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
9 * For detailed explanation of Read-Copy Update mechanism see -
10 * Documentation/RCU
12 #include <linux/completion.h>
13 #include <linux/interrupt.h>
14 #include <linux/notifier.h>
15 #include <linux/rcupdate_wait.h>
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/mutex.h>
19 #include <linux/sched.h>
20 #include <linux/types.h>
21 #include <linux/init.h>
22 #include <linux/time.h>
23 #include <linux/cpu.h>
24 #include <linux/prefetch.h>
25 #include <linux/slab.h>
26 #include <linux/mm.h>
28 #include "rcu.h"
30 /* Global control variables for rcupdate callback mechanism. */
31 struct rcu_ctrlblk {
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
35 unsigned long gp_seq; /* Grace-period counter. */
38 /* Definition for rcupdate control block. */
39 static struct rcu_ctrlblk rcu_ctrlblk = {
40 .donetail = &rcu_ctrlblk.rcucblist,
41 .curtail = &rcu_ctrlblk.rcucblist,
42 .gp_seq = 0 - 300UL,
45 void rcu_barrier(void)
47 wait_rcu_gp(call_rcu_hurry);
49 EXPORT_SYMBOL(rcu_barrier);
51 /* Record an rcu quiescent state. */
52 void rcu_qs(void)
54 unsigned long flags;
56 local_irq_save(flags);
57 if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
58 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
59 raise_softirq_irqoff(RCU_SOFTIRQ);
61 WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
62 local_irq_restore(flags);
66 * Check to see if the scheduling-clock interrupt came from an extended
67 * quiescent state, and, if so, tell RCU about it. This function must
68 * be called from hardirq context. It is normally called from the
69 * scheduling-clock interrupt.
71 void rcu_sched_clock_irq(int user)
73 if (user) {
74 rcu_qs();
75 } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
76 set_tsk_need_resched(current);
77 set_preempt_need_resched();
82 * Reclaim the specified callback, either by invoking it for non-kfree cases or
83 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
85 static inline bool rcu_reclaim_tiny(struct rcu_head *head)
87 rcu_callback_t f;
88 unsigned long offset = (unsigned long)head->func;
90 rcu_lock_acquire(&rcu_callback_map);
91 if (__is_kvfree_rcu_offset(offset)) {
92 trace_rcu_invoke_kvfree_callback("", head, offset);
93 kvfree((void *)head - offset);
94 rcu_lock_release(&rcu_callback_map);
95 return true;
98 trace_rcu_invoke_callback("", head);
99 f = head->func;
100 debug_rcu_head_callback(head);
101 WRITE_ONCE(head->func, (rcu_callback_t)0L);
102 f(head);
103 rcu_lock_release(&rcu_callback_map);
104 return false;
107 /* Invoke the RCU callbacks whose grace period has elapsed. */
108 static __latent_entropy void rcu_process_callbacks(void)
110 struct rcu_head *next, *list;
111 unsigned long flags;
113 /* Move the ready-to-invoke callbacks to a local list. */
114 local_irq_save(flags);
115 if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
116 /* No callbacks ready, so just leave. */
117 local_irq_restore(flags);
118 return;
120 list = rcu_ctrlblk.rcucblist;
121 rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
122 *rcu_ctrlblk.donetail = NULL;
123 if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
124 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
125 rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
126 local_irq_restore(flags);
128 /* Invoke the callbacks on the local list. */
129 while (list) {
130 next = list->next;
131 prefetch(next);
132 debug_rcu_head_unqueue(list);
133 rcu_reclaim_tiny(list);
134 list = next;
139 * Wait for a grace period to elapse. But it is illegal to invoke
140 * synchronize_rcu() from within an RCU read-side critical section.
141 * Therefore, any legal call to synchronize_rcu() is a quiescent state,
142 * and so on a UP system, synchronize_rcu() need do nothing, other than
143 * let the polled APIs know that another grace period elapsed.
145 * (But Lai Jiangshan points out the benefits of doing might_sleep()
146 * to reduce latency.)
148 * Cool, huh? (Due to Josh Triplett.)
150 void synchronize_rcu(void)
152 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
153 lock_is_held(&rcu_lock_map) ||
154 lock_is_held(&rcu_sched_lock_map),
155 "Illegal synchronize_rcu() in RCU read-side critical section");
156 preempt_disable();
157 WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
158 preempt_enable();
160 EXPORT_SYMBOL_GPL(synchronize_rcu);
162 static void tiny_rcu_leak_callback(struct rcu_head *rhp)
167 * Post an RCU callback to be invoked after the end of an RCU grace
168 * period. But since we have but one CPU, that would be after any
169 * quiescent state.
171 void call_rcu(struct rcu_head *head, rcu_callback_t func)
173 static atomic_t doublefrees;
174 unsigned long flags;
176 if (debug_rcu_head_queue(head)) {
177 if (atomic_inc_return(&doublefrees) < 4) {
178 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
179 mem_dump_obj(head);
182 if (!__is_kvfree_rcu_offset((unsigned long)head->func))
183 WRITE_ONCE(head->func, tiny_rcu_leak_callback);
184 return;
187 head->func = func;
188 head->next = NULL;
190 local_irq_save(flags);
191 *rcu_ctrlblk.curtail = head;
192 rcu_ctrlblk.curtail = &head->next;
193 local_irq_restore(flags);
195 if (unlikely(is_idle_task(current))) {
196 /* force scheduling for rcu_qs() */
197 resched_cpu(0);
200 EXPORT_SYMBOL_GPL(call_rcu);
203 * Store a grace-period-counter "cookie". For more information,
204 * see the Tree RCU header comment.
206 void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
208 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
210 EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
213 * Return a grace-period-counter "cookie". For more information,
214 * see the Tree RCU header comment.
216 unsigned long get_state_synchronize_rcu(void)
218 return READ_ONCE(rcu_ctrlblk.gp_seq);
220 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
223 * Return a grace-period-counter "cookie" and ensure that a future grace
224 * period completes. For more information, see the Tree RCU header comment.
226 unsigned long start_poll_synchronize_rcu(void)
228 unsigned long gp_seq = get_state_synchronize_rcu();
230 if (unlikely(is_idle_task(current))) {
231 /* force scheduling for rcu_qs() */
232 resched_cpu(0);
234 return gp_seq;
236 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
239 * Return true if the grace period corresponding to oldstate has completed
240 * and false otherwise. For more information, see the Tree RCU header
241 * comment.
243 bool poll_state_synchronize_rcu(unsigned long oldstate)
245 return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
247 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
249 #ifdef CONFIG_KASAN_GENERIC
250 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
252 if (head)
253 kasan_record_aux_stack_noalloc(ptr);
255 __kvfree_call_rcu(head, ptr);
257 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
258 #endif
260 void __init rcu_init(void)
262 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
263 rcu_early_boot_tests();
264 tasks_cblist_init_generic();