mtd: rawnand: Support bad block markers in first, second or last page
[linux/fpc-iii.git] / kernel / livepatch / shadow.c
blob83958c8144395b64e5e8e3a7e603ff9402175a9c
1 /*
2 * shadow.c - Shadow Variables
4 * Copyright (C) 2014 Josh Poimboeuf <jpoimboe@redhat.com>
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2017 Joe Lawrence <joe.lawrence@redhat.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 /**
23 * DOC: Shadow variable API concurrency notes:
25 * The shadow variable API provides a simple relationship between an
26 * <obj, id> pair and a pointer value. It is the responsibility of the
27 * caller to provide any mutual exclusion required of the shadow data.
29 * Once a shadow variable is attached to its parent object via the
30 * klp_shadow_*alloc() API calls, it is considered live: any subsequent
31 * call to klp_shadow_get() may then return the shadow variable's data
32 * pointer. Callers of klp_shadow_*alloc() should prepare shadow data
33 * accordingly.
35 * The klp_shadow_*alloc() API calls may allocate memory for new shadow
36 * variable structures. Their implementation does not call kmalloc
37 * inside any spinlocks, but API callers should pass GFP flags according
38 * to their specific needs.
40 * The klp_shadow_hash is an RCU-enabled hashtable and is safe against
41 * concurrent klp_shadow_free() and klp_shadow_get() operations.
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/hashtable.h>
47 #include <linux/slab.h>
48 #include <linux/livepatch.h>
50 static DEFINE_HASHTABLE(klp_shadow_hash, 12);
53 * klp_shadow_lock provides exclusive access to the klp_shadow_hash and
54 * the shadow variables it references.
56 static DEFINE_SPINLOCK(klp_shadow_lock);
58 /**
59 * struct klp_shadow - shadow variable structure
60 * @node: klp_shadow_hash hash table node
61 * @rcu_head: RCU is used to safely free this structure
62 * @obj: pointer to parent object
63 * @id: data identifier
64 * @data: data area
66 struct klp_shadow {
67 struct hlist_node node;
68 struct rcu_head rcu_head;
69 void *obj;
70 unsigned long id;
71 char data[];
74 /**
75 * klp_shadow_match() - verify a shadow variable matches given <obj, id>
76 * @shadow: shadow variable to match
77 * @obj: pointer to parent object
78 * @id: data identifier
80 * Return: true if the shadow variable matches.
82 static inline bool klp_shadow_match(struct klp_shadow *shadow, void *obj,
83 unsigned long id)
85 return shadow->obj == obj && shadow->id == id;
88 /**
89 * klp_shadow_get() - retrieve a shadow variable data pointer
90 * @obj: pointer to parent object
91 * @id: data identifier
93 * Return: the shadow variable data element, NULL on failure.
95 void *klp_shadow_get(void *obj, unsigned long id)
97 struct klp_shadow *shadow;
99 rcu_read_lock();
101 hash_for_each_possible_rcu(klp_shadow_hash, shadow, node,
102 (unsigned long)obj) {
104 if (klp_shadow_match(shadow, obj, id)) {
105 rcu_read_unlock();
106 return shadow->data;
110 rcu_read_unlock();
112 return NULL;
114 EXPORT_SYMBOL_GPL(klp_shadow_get);
116 static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id,
117 size_t size, gfp_t gfp_flags,
118 klp_shadow_ctor_t ctor, void *ctor_data,
119 bool warn_on_exist)
121 struct klp_shadow *new_shadow;
122 void *shadow_data;
123 unsigned long flags;
125 /* Check if the shadow variable already exists */
126 shadow_data = klp_shadow_get(obj, id);
127 if (shadow_data)
128 goto exists;
131 * Allocate a new shadow variable. Fill it with zeroes by default.
132 * More complex setting can be done by @ctor function. But it is
133 * called only when the buffer is really used (under klp_shadow_lock).
135 new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags);
136 if (!new_shadow)
137 return NULL;
139 /* Look for <obj, id> again under the lock */
140 spin_lock_irqsave(&klp_shadow_lock, flags);
141 shadow_data = klp_shadow_get(obj, id);
142 if (unlikely(shadow_data)) {
144 * Shadow variable was found, throw away speculative
145 * allocation.
147 spin_unlock_irqrestore(&klp_shadow_lock, flags);
148 kfree(new_shadow);
149 goto exists;
152 new_shadow->obj = obj;
153 new_shadow->id = id;
155 if (ctor) {
156 int err;
158 err = ctor(obj, new_shadow->data, ctor_data);
159 if (err) {
160 spin_unlock_irqrestore(&klp_shadow_lock, flags);
161 kfree(new_shadow);
162 pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n",
163 obj, id, err);
164 return NULL;
168 /* No <obj, id> found, so attach the newly allocated one */
169 hash_add_rcu(klp_shadow_hash, &new_shadow->node,
170 (unsigned long)new_shadow->obj);
171 spin_unlock_irqrestore(&klp_shadow_lock, flags);
173 return new_shadow->data;
175 exists:
176 if (warn_on_exist) {
177 WARN(1, "Duplicate shadow variable <%p, %lx>\n", obj, id);
178 return NULL;
181 return shadow_data;
185 * klp_shadow_alloc() - allocate and add a new shadow variable
186 * @obj: pointer to parent object
187 * @id: data identifier
188 * @size: size of attached data
189 * @gfp_flags: GFP mask for allocation
190 * @ctor: custom constructor to initialize the shadow data (optional)
191 * @ctor_data: pointer to any data needed by @ctor (optional)
193 * Allocates @size bytes for new shadow variable data using @gfp_flags.
194 * The data are zeroed by default. They are further initialized by @ctor
195 * function if it is not NULL. The new shadow variable is then added
196 * to the global hashtable.
198 * If an existing <obj, id> shadow variable can be found, this routine will
199 * issue a WARN, exit early and return NULL.
201 * This function guarantees that the constructor function is called only when
202 * the variable did not exist before. The cost is that @ctor is called
203 * in atomic context under a spin lock.
205 * Return: the shadow variable data element, NULL on duplicate or
206 * failure.
208 void *klp_shadow_alloc(void *obj, unsigned long id,
209 size_t size, gfp_t gfp_flags,
210 klp_shadow_ctor_t ctor, void *ctor_data)
212 return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
213 ctor, ctor_data, true);
215 EXPORT_SYMBOL_GPL(klp_shadow_alloc);
218 * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable
219 * @obj: pointer to parent object
220 * @id: data identifier
221 * @size: size of attached data
222 * @gfp_flags: GFP mask for allocation
223 * @ctor: custom constructor to initialize the shadow data (optional)
224 * @ctor_data: pointer to any data needed by @ctor (optional)
226 * Returns a pointer to existing shadow data if an <obj, id> shadow
227 * variable is already present. Otherwise, it creates a new shadow
228 * variable like klp_shadow_alloc().
230 * This function guarantees that only one shadow variable exists with the given
231 * @id for the given @obj. It also guarantees that the constructor function
232 * will be called only when the variable did not exist before. The cost is
233 * that @ctor is called in atomic context under a spin lock.
235 * Return: the shadow variable data element, NULL on failure.
237 void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
238 size_t size, gfp_t gfp_flags,
239 klp_shadow_ctor_t ctor, void *ctor_data)
241 return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
242 ctor, ctor_data, false);
244 EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc);
246 static void klp_shadow_free_struct(struct klp_shadow *shadow,
247 klp_shadow_dtor_t dtor)
249 hash_del_rcu(&shadow->node);
250 if (dtor)
251 dtor(shadow->obj, shadow->data);
252 kfree_rcu(shadow, rcu_head);
256 * klp_shadow_free() - detach and free a <obj, id> shadow variable
257 * @obj: pointer to parent object
258 * @id: data identifier
259 * @dtor: custom callback that can be used to unregister the variable
260 * and/or free data that the shadow variable points to (optional)
262 * This function releases the memory for this <obj, id> shadow variable
263 * instance, callers should stop referencing it accordingly.
265 void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
267 struct klp_shadow *shadow;
268 unsigned long flags;
270 spin_lock_irqsave(&klp_shadow_lock, flags);
272 /* Delete <obj, id> from hash */
273 hash_for_each_possible(klp_shadow_hash, shadow, node,
274 (unsigned long)obj) {
276 if (klp_shadow_match(shadow, obj, id)) {
277 klp_shadow_free_struct(shadow, dtor);
278 break;
282 spin_unlock_irqrestore(&klp_shadow_lock, flags);
284 EXPORT_SYMBOL_GPL(klp_shadow_free);
287 * klp_shadow_free_all() - detach and free all <*, id> shadow variables
288 * @id: data identifier
289 * @dtor: custom callback that can be used to unregister the variable
290 * and/or free data that the shadow variable points to (optional)
292 * This function releases the memory for all <*, id> shadow variable
293 * instances, callers should stop referencing them accordingly.
295 void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
297 struct klp_shadow *shadow;
298 unsigned long flags;
299 int i;
301 spin_lock_irqsave(&klp_shadow_lock, flags);
303 /* Delete all <*, id> from hash */
304 hash_for_each(klp_shadow_hash, i, shadow, node) {
305 if (klp_shadow_match(shadow, shadow->obj, id))
306 klp_shadow_free_struct(shadow, dtor);
309 spin_unlock_irqrestore(&klp_shadow_lock, flags);
311 EXPORT_SYMBOL_GPL(klp_shadow_free_all);