Linux 4.13.16
[linux/fpc-iii.git] / kernel / rcu / srcutiny.c
blob1a1c1047d2edecaba7691d8c6f9fd3851c3c434a
1 /*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion,
3 * tiny version for non-preemptible single-CPU use.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
19 * Copyright (C) IBM Corporation, 2017
21 * Author: Paul McKenney <paulmck@us.ibm.com>
24 #include <linux/export.h>
25 #include <linux/mutex.h>
26 #include <linux/preempt.h>
27 #include <linux/rcupdate_wait.h>
28 #include <linux/sched.h>
29 #include <linux/delay.h>
30 #include <linux/srcu.h>
32 #include <linux/rcu_node_tree.h>
33 #include "rcu_segcblist.h"
34 #include "rcu.h"
36 static int init_srcu_struct_fields(struct srcu_struct *sp)
38 sp->srcu_lock_nesting[0] = 0;
39 sp->srcu_lock_nesting[1] = 0;
40 init_swait_queue_head(&sp->srcu_wq);
41 sp->srcu_cb_head = NULL;
42 sp->srcu_cb_tail = &sp->srcu_cb_head;
43 sp->srcu_gp_running = false;
44 sp->srcu_gp_waiting = false;
45 sp->srcu_idx = 0;
46 INIT_WORK(&sp->srcu_work, srcu_drive_gp);
47 return 0;
50 #ifdef CONFIG_DEBUG_LOCK_ALLOC
52 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
53 struct lock_class_key *key)
55 /* Don't re-initialize a lock while it is held. */
56 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
57 lockdep_init_map(&sp->dep_map, name, key, 0);
58 return init_srcu_struct_fields(sp);
60 EXPORT_SYMBOL_GPL(__init_srcu_struct);
62 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
65 * init_srcu_struct - initialize a sleep-RCU structure
66 * @sp: structure to initialize.
68 * Must invoke this on a given srcu_struct before passing that srcu_struct
69 * to any other function. Each srcu_struct represents a separate domain
70 * of SRCU protection.
72 int init_srcu_struct(struct srcu_struct *sp)
74 return init_srcu_struct_fields(sp);
76 EXPORT_SYMBOL_GPL(init_srcu_struct);
78 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
81 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
82 * @sp: structure to clean up.
84 * Must invoke this after you are finished using a given srcu_struct that
85 * was initialized via init_srcu_struct(), else you leak memory.
87 void cleanup_srcu_struct(struct srcu_struct *sp)
89 WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
90 flush_work(&sp->srcu_work);
91 WARN_ON(sp->srcu_gp_running);
92 WARN_ON(sp->srcu_gp_waiting);
93 WARN_ON(sp->srcu_cb_head);
94 WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
96 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
99 * Removes the count for the old reader from the appropriate element of
100 * the srcu_struct.
102 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
104 int newval = sp->srcu_lock_nesting[idx] - 1;
106 WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
107 if (!newval && READ_ONCE(sp->srcu_gp_waiting))
108 swake_up(&sp->srcu_wq);
110 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
113 * Workqueue handler to drive one grace period and invoke any callbacks
114 * that become ready as a result. Single-CPU and !PREEMPT operation
115 * means that we get away with murder on synchronization. ;-)
117 void srcu_drive_gp(struct work_struct *wp)
119 int idx;
120 struct rcu_head *lh;
121 struct rcu_head *rhp;
122 struct srcu_struct *sp;
124 sp = container_of(wp, struct srcu_struct, srcu_work);
125 if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
126 return; /* Already running or nothing to do. */
128 /* Remove recently arrived callbacks and wait for readers. */
129 WRITE_ONCE(sp->srcu_gp_running, true);
130 local_irq_disable();
131 lh = sp->srcu_cb_head;
132 sp->srcu_cb_head = NULL;
133 sp->srcu_cb_tail = &sp->srcu_cb_head;
134 local_irq_enable();
135 idx = sp->srcu_idx;
136 WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
137 WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
138 swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
139 WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
141 /* Invoke the callbacks we removed above. */
142 while (lh) {
143 rhp = lh;
144 lh = lh->next;
145 local_bh_disable();
146 rhp->func(rhp);
147 local_bh_enable();
151 * Enable rescheduling, and if there are more callbacks,
152 * reschedule ourselves. This can race with a call_srcu()
153 * at interrupt level, but the ->srcu_gp_running checks will
154 * straighten that out.
156 WRITE_ONCE(sp->srcu_gp_running, false);
157 if (READ_ONCE(sp->srcu_cb_head))
158 schedule_work(&sp->srcu_work);
160 EXPORT_SYMBOL_GPL(srcu_drive_gp);
163 * Enqueue an SRCU callback on the specified srcu_struct structure,
164 * initiating grace-period processing if it is not already running.
166 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
167 rcu_callback_t func)
169 unsigned long flags;
171 rhp->func = func;
172 rhp->next = NULL;
173 local_irq_save(flags);
174 *sp->srcu_cb_tail = rhp;
175 sp->srcu_cb_tail = &rhp->next;
176 local_irq_restore(flags);
177 if (!READ_ONCE(sp->srcu_gp_running))
178 schedule_work(&sp->srcu_work);
180 EXPORT_SYMBOL_GPL(call_srcu);
183 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
185 void synchronize_srcu(struct srcu_struct *sp)
187 struct rcu_synchronize rs;
189 init_rcu_head_on_stack(&rs.head);
190 init_completion(&rs.completion);
191 call_srcu(sp, &rs.head, wakeme_after_rcu);
192 wait_for_completion(&rs.completion);
193 destroy_rcu_head_on_stack(&rs.head);
195 EXPORT_SYMBOL_GPL(synchronize_srcu);