drm/modes: Fix drm_mode_vrefres() docs
[drm/drm-misc.git] / kernel / static_call_inline.c
blob5259cda486d058f4bb2f75c2cef3be2f6d8f3b57
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/init.h>
3 #include <linux/static_call.h>
4 #include <linux/bug.h>
5 #include <linux/smp.h>
6 #include <linux/sort.h>
7 #include <linux/slab.h>
8 #include <linux/module.h>
9 #include <linux/cpu.h>
10 #include <linux/processor.h>
11 #include <asm/sections.h>
13 extern struct static_call_site __start_static_call_sites[],
14 __stop_static_call_sites[];
15 extern struct static_call_tramp_key __start_static_call_tramp_key[],
16 __stop_static_call_tramp_key[];
18 static int static_call_initialized;
21 * Must be called before early_initcall() to be effective.
23 void static_call_force_reinit(void)
25 if (WARN_ON_ONCE(!static_call_initialized))
26 return;
28 static_call_initialized++;
31 /* mutex to protect key modules/sites */
32 static DEFINE_MUTEX(static_call_mutex);
34 static void static_call_lock(void)
36 mutex_lock(&static_call_mutex);
39 static void static_call_unlock(void)
41 mutex_unlock(&static_call_mutex);
44 static inline void *static_call_addr(struct static_call_site *site)
46 return (void *)((long)site->addr + (long)&site->addr);
49 static inline unsigned long __static_call_key(const struct static_call_site *site)
51 return (long)site->key + (long)&site->key;
54 static inline struct static_call_key *static_call_key(const struct static_call_site *site)
56 return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
59 /* These assume the key is word-aligned. */
60 static inline bool static_call_is_init(struct static_call_site *site)
62 return __static_call_key(site) & STATIC_CALL_SITE_INIT;
65 static inline bool static_call_is_tail(struct static_call_site *site)
67 return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
70 static inline void static_call_set_init(struct static_call_site *site)
72 site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
73 (long)&site->key;
76 static int static_call_site_cmp(const void *_a, const void *_b)
78 const struct static_call_site *a = _a;
79 const struct static_call_site *b = _b;
80 const struct static_call_key *key_a = static_call_key(a);
81 const struct static_call_key *key_b = static_call_key(b);
83 if (key_a < key_b)
84 return -1;
86 if (key_a > key_b)
87 return 1;
89 return 0;
92 static void static_call_site_swap(void *_a, void *_b, int size)
94 long delta = (unsigned long)_a - (unsigned long)_b;
95 struct static_call_site *a = _a;
96 struct static_call_site *b = _b;
97 struct static_call_site tmp = *a;
99 a->addr = b->addr - delta;
100 a->key = b->key - delta;
102 b->addr = tmp.addr + delta;
103 b->key = tmp.key + delta;
106 static inline void static_call_sort_entries(struct static_call_site *start,
107 struct static_call_site *stop)
109 sort(start, stop - start, sizeof(struct static_call_site),
110 static_call_site_cmp, static_call_site_swap);
113 static inline bool static_call_key_has_mods(struct static_call_key *key)
115 return !(key->type & 1);
118 static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
120 if (!static_call_key_has_mods(key))
121 return NULL;
123 return key->mods;
126 static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
128 if (static_call_key_has_mods(key))
129 return NULL;
131 return (struct static_call_site *)(key->type & ~1);
134 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
136 struct static_call_site *site, *stop;
137 struct static_call_mod *site_mod, first;
139 cpus_read_lock();
140 static_call_lock();
142 if (key->func == func)
143 goto done;
145 key->func = func;
147 arch_static_call_transform(NULL, tramp, func, false);
150 * If uninitialized, we'll not update the callsites, but they still
151 * point to the trampoline and we just patched that.
153 if (WARN_ON_ONCE(!static_call_initialized))
154 goto done;
156 first = (struct static_call_mod){
157 .next = static_call_key_next(key),
158 .mod = NULL,
159 .sites = static_call_key_sites(key),
162 for (site_mod = &first; site_mod; site_mod = site_mod->next) {
163 bool init = system_state < SYSTEM_RUNNING;
164 struct module *mod = site_mod->mod;
166 if (!site_mod->sites) {
168 * This can happen if the static call key is defined in
169 * a module which doesn't use it.
171 * It also happens in the has_mods case, where the
172 * 'first' entry has no sites associated with it.
174 continue;
177 stop = __stop_static_call_sites;
179 if (mod) {
180 #ifdef CONFIG_MODULES
181 stop = mod->static_call_sites +
182 mod->num_static_call_sites;
183 init = mod->state == MODULE_STATE_COMING;
184 #endif
187 for (site = site_mod->sites;
188 site < stop && static_call_key(site) == key; site++) {
189 void *site_addr = static_call_addr(site);
191 if (!init && static_call_is_init(site))
192 continue;
194 if (!kernel_text_address((unsigned long)site_addr)) {
196 * This skips patching built-in __exit, which
197 * is part of init_section_contains() but is
198 * not part of kernel_text_address().
200 * Skipping built-in __exit is fine since it
201 * will never be executed.
203 WARN_ONCE(!static_call_is_init(site),
204 "can't patch static call site at %pS",
205 site_addr);
206 continue;
209 arch_static_call_transform(site_addr, NULL, func,
210 static_call_is_tail(site));
214 done:
215 static_call_unlock();
216 cpus_read_unlock();
218 EXPORT_SYMBOL_GPL(__static_call_update);
220 static int __static_call_init(struct module *mod,
221 struct static_call_site *start,
222 struct static_call_site *stop)
224 struct static_call_site *site;
225 struct static_call_key *key, *prev_key = NULL;
226 struct static_call_mod *site_mod;
228 if (start == stop)
229 return 0;
231 static_call_sort_entries(start, stop);
233 for (site = start; site < stop; site++) {
234 void *site_addr = static_call_addr(site);
236 if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
237 (!mod && init_section_contains(site_addr, 1)))
238 static_call_set_init(site);
240 key = static_call_key(site);
241 if (key != prev_key) {
242 prev_key = key;
245 * For vmlinux (!mod) avoid the allocation by storing
246 * the sites pointer in the key itself. Also see
247 * __static_call_update()'s @first.
249 * This allows architectures (eg. x86) to call
250 * static_call_init() before memory allocation works.
252 if (!mod) {
253 key->sites = site;
254 key->type |= 1;
255 goto do_transform;
258 site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
259 if (!site_mod)
260 return -ENOMEM;
263 * When the key has a direct sites pointer, extract
264 * that into an explicit struct static_call_mod, so we
265 * can have a list of modules.
267 if (static_call_key_sites(key)) {
268 site_mod->mod = NULL;
269 site_mod->next = NULL;
270 site_mod->sites = static_call_key_sites(key);
272 key->mods = site_mod;
274 site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
275 if (!site_mod)
276 return -ENOMEM;
279 site_mod->mod = mod;
280 site_mod->sites = site;
281 site_mod->next = static_call_key_next(key);
282 key->mods = site_mod;
285 do_transform:
286 arch_static_call_transform(site_addr, NULL, key->func,
287 static_call_is_tail(site));
290 return 0;
293 static int addr_conflict(struct static_call_site *site, void *start, void *end)
295 unsigned long addr = (unsigned long)static_call_addr(site);
297 if (addr <= (unsigned long)end &&
298 addr + CALL_INSN_SIZE > (unsigned long)start)
299 return 1;
301 return 0;
304 static int __static_call_text_reserved(struct static_call_site *iter_start,
305 struct static_call_site *iter_stop,
306 void *start, void *end, bool init)
308 struct static_call_site *iter = iter_start;
310 while (iter < iter_stop) {
311 if (init || !static_call_is_init(iter)) {
312 if (addr_conflict(iter, start, end))
313 return 1;
315 iter++;
318 return 0;
321 #ifdef CONFIG_MODULES
323 static int __static_call_mod_text_reserved(void *start, void *end)
325 struct module *mod;
326 int ret;
328 preempt_disable();
329 mod = __module_text_address((unsigned long)start);
330 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
331 if (!try_module_get(mod))
332 mod = NULL;
333 preempt_enable();
335 if (!mod)
336 return 0;
338 ret = __static_call_text_reserved(mod->static_call_sites,
339 mod->static_call_sites + mod->num_static_call_sites,
340 start, end, mod->state == MODULE_STATE_COMING);
342 module_put(mod);
344 return ret;
347 static unsigned long tramp_key_lookup(unsigned long addr)
349 struct static_call_tramp_key *start = __start_static_call_tramp_key;
350 struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
351 struct static_call_tramp_key *tramp_key;
353 for (tramp_key = start; tramp_key != stop; tramp_key++) {
354 unsigned long tramp;
356 tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
357 if (tramp == addr)
358 return (long)tramp_key->key + (long)&tramp_key->key;
361 return 0;
364 static int static_call_add_module(struct module *mod)
366 struct static_call_site *start = mod->static_call_sites;
367 struct static_call_site *stop = start + mod->num_static_call_sites;
368 struct static_call_site *site;
370 for (site = start; site != stop; site++) {
371 unsigned long s_key = __static_call_key(site);
372 unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
373 unsigned long key;
376 * Is the key is exported, 'addr' points to the key, which
377 * means modules are allowed to call static_call_update() on
378 * it.
380 * Otherwise, the key isn't exported, and 'addr' points to the
381 * trampoline so we need to lookup the key.
383 * We go through this dance to prevent crazy modules from
384 * abusing sensitive static calls.
386 if (!kernel_text_address(addr))
387 continue;
389 key = tramp_key_lookup(addr);
390 if (!key) {
391 pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
392 static_call_addr(site));
393 return -EINVAL;
396 key |= s_key & STATIC_CALL_SITE_FLAGS;
397 site->key = key - (long)&site->key;
400 return __static_call_init(mod, start, stop);
403 static void static_call_del_module(struct module *mod)
405 struct static_call_site *start = mod->static_call_sites;
406 struct static_call_site *stop = mod->static_call_sites +
407 mod->num_static_call_sites;
408 struct static_call_key *key, *prev_key = NULL;
409 struct static_call_mod *site_mod, **prev;
410 struct static_call_site *site;
412 for (site = start; site < stop; site++) {
413 key = static_call_key(site);
416 * If the key was not updated due to a memory allocation
417 * failure in __static_call_init() then treating key::sites
418 * as key::mods in the code below would cause random memory
419 * access and #GP. In that case all subsequent sites have
420 * not been touched either, so stop iterating.
422 if (!static_call_key_has_mods(key))
423 break;
425 if (key == prev_key)
426 continue;
428 prev_key = key;
430 for (prev = &key->mods, site_mod = key->mods;
431 site_mod && site_mod->mod != mod;
432 prev = &site_mod->next, site_mod = site_mod->next)
435 if (!site_mod)
436 continue;
438 *prev = site_mod->next;
439 kfree(site_mod);
443 static int static_call_module_notify(struct notifier_block *nb,
444 unsigned long val, void *data)
446 struct module *mod = data;
447 int ret = 0;
449 cpus_read_lock();
450 static_call_lock();
452 switch (val) {
453 case MODULE_STATE_COMING:
454 ret = static_call_add_module(mod);
455 if (ret) {
456 pr_warn("Failed to allocate memory for static calls\n");
457 static_call_del_module(mod);
459 break;
460 case MODULE_STATE_GOING:
461 static_call_del_module(mod);
462 break;
465 static_call_unlock();
466 cpus_read_unlock();
468 return notifier_from_errno(ret);
471 static struct notifier_block static_call_module_nb = {
472 .notifier_call = static_call_module_notify,
475 #else
477 static inline int __static_call_mod_text_reserved(void *start, void *end)
479 return 0;
482 #endif /* CONFIG_MODULES */
484 int static_call_text_reserved(void *start, void *end)
486 bool init = system_state < SYSTEM_RUNNING;
487 int ret = __static_call_text_reserved(__start_static_call_sites,
488 __stop_static_call_sites, start, end, init);
490 if (ret)
491 return ret;
493 return __static_call_mod_text_reserved(start, end);
496 int __init static_call_init(void)
498 int ret;
500 /* See static_call_force_reinit(). */
501 if (static_call_initialized == 1)
502 return 0;
504 cpus_read_lock();
505 static_call_lock();
506 ret = __static_call_init(NULL, __start_static_call_sites,
507 __stop_static_call_sites);
508 static_call_unlock();
509 cpus_read_unlock();
511 if (ret) {
512 pr_err("Failed to allocate memory for static_call!\n");
513 BUG();
516 #ifdef CONFIG_MODULES
517 if (!static_call_initialized)
518 register_module_notifier(&static_call_module_nb);
519 #endif
521 static_call_initialized = 1;
522 return 0;
524 early_initcall(static_call_init);
526 #ifdef CONFIG_STATIC_CALL_SELFTEST
528 static int func_a(int x)
530 return x+1;
533 static int func_b(int x)
535 return x+2;
538 DEFINE_STATIC_CALL(sc_selftest, func_a);
540 static struct static_call_data {
541 int (*func)(int);
542 int val;
543 int expect;
544 } static_call_data [] __initdata = {
545 { NULL, 2, 3 },
546 { func_b, 2, 4 },
547 { func_a, 2, 3 }
550 static int __init test_static_call_init(void)
552 int i;
554 for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
555 struct static_call_data *scd = &static_call_data[i];
557 if (scd->func)
558 static_call_update(sc_selftest, scd->func);
560 WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
563 return 0;
565 early_initcall(test_static_call_init);
567 #endif /* CONFIG_STATIC_CALL_SELFTEST */