mtd: rawnand: brcmnand: fallback to detected ecc-strength, ecc-step-size
[linux/fpc-iii.git] / kernel / jump_label.c
blob0bfa10f4410c5d80049b94126ed97dab1b19010a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * jump label support
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 * Copyright (C) 2011 Peter Zijlstra
8 */
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
22 /* mutex to protect coming/going of the the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
25 void jump_label_lock(void)
27 mutex_lock(&jump_label_mutex);
30 void jump_label_unlock(void)
32 mutex_unlock(&jump_label_mutex);
35 static int jump_label_cmp(const void *a, const void *b)
37 const struct jump_entry *jea = a;
38 const struct jump_entry *jeb = b;
40 if (jump_entry_key(jea) < jump_entry_key(jeb))
41 return -1;
43 if (jump_entry_key(jea) > jump_entry_key(jeb))
44 return 1;
46 return 0;
49 static void jump_label_swap(void *a, void *b, int size)
51 long delta = (unsigned long)a - (unsigned long)b;
52 struct jump_entry *jea = a;
53 struct jump_entry *jeb = b;
54 struct jump_entry tmp = *jea;
56 jea->code = jeb->code - delta;
57 jea->target = jeb->target - delta;
58 jea->key = jeb->key - delta;
60 jeb->code = tmp.code + delta;
61 jeb->target = tmp.target + delta;
62 jeb->key = tmp.key + delta;
65 static void
66 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
68 unsigned long size;
69 void *swapfn = NULL;
71 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
72 swapfn = jump_label_swap;
74 size = (((unsigned long)stop - (unsigned long)start)
75 / sizeof(struct jump_entry));
76 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
79 static void jump_label_update(struct static_key *key);
82 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
83 * The use of 'atomic_read()' requires atomic.h and its problematic for some
84 * kernel headers such as kernel.h and others. Since static_key_count() is not
85 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
86 * to have it be a function here. Similarly, for 'static_key_enable()' and
87 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
88 * to be included from most/all places for CONFIG_JUMP_LABEL.
90 int static_key_count(struct static_key *key)
93 * -1 means the first static_key_slow_inc() is in progress.
94 * static_key_enabled() must return true, so return 1 here.
96 int n = atomic_read(&key->enabled);
98 return n >= 0 ? n : 1;
100 EXPORT_SYMBOL_GPL(static_key_count);
102 void static_key_slow_inc_cpuslocked(struct static_key *key)
104 int v, v1;
106 STATIC_KEY_CHECK_USE(key);
107 lockdep_assert_cpus_held();
110 * Careful if we get concurrent static_key_slow_inc() calls;
111 * later calls must wait for the first one to _finish_ the
112 * jump_label_update() process. At the same time, however,
113 * the jump_label_update() call below wants to see
114 * static_key_enabled(&key) for jumps to be updated properly.
116 * So give a special meaning to negative key->enabled: it sends
117 * static_key_slow_inc() down the slow path, and it is non-zero
118 * so it counts as "enabled" in jump_label_update(). Note that
119 * atomic_inc_unless_negative() checks >= 0, so roll our own.
121 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
122 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
123 if (likely(v1 == v))
124 return;
127 jump_label_lock();
128 if (atomic_read(&key->enabled) == 0) {
129 atomic_set(&key->enabled, -1);
130 jump_label_update(key);
132 * Ensure that if the above cmpxchg loop observes our positive
133 * value, it must also observe all the text changes.
135 atomic_set_release(&key->enabled, 1);
136 } else {
137 atomic_inc(&key->enabled);
139 jump_label_unlock();
142 void static_key_slow_inc(struct static_key *key)
144 cpus_read_lock();
145 static_key_slow_inc_cpuslocked(key);
146 cpus_read_unlock();
148 EXPORT_SYMBOL_GPL(static_key_slow_inc);
150 void static_key_enable_cpuslocked(struct static_key *key)
152 STATIC_KEY_CHECK_USE(key);
153 lockdep_assert_cpus_held();
155 if (atomic_read(&key->enabled) > 0) {
156 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
157 return;
160 jump_label_lock();
161 if (atomic_read(&key->enabled) == 0) {
162 atomic_set(&key->enabled, -1);
163 jump_label_update(key);
165 * See static_key_slow_inc().
167 atomic_set_release(&key->enabled, 1);
169 jump_label_unlock();
171 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
173 void static_key_enable(struct static_key *key)
175 cpus_read_lock();
176 static_key_enable_cpuslocked(key);
177 cpus_read_unlock();
179 EXPORT_SYMBOL_GPL(static_key_enable);
181 void static_key_disable_cpuslocked(struct static_key *key)
183 STATIC_KEY_CHECK_USE(key);
184 lockdep_assert_cpus_held();
186 if (atomic_read(&key->enabled) != 1) {
187 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
188 return;
191 jump_label_lock();
192 if (atomic_cmpxchg(&key->enabled, 1, 0))
193 jump_label_update(key);
194 jump_label_unlock();
196 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
198 void static_key_disable(struct static_key *key)
200 cpus_read_lock();
201 static_key_disable_cpuslocked(key);
202 cpus_read_unlock();
204 EXPORT_SYMBOL_GPL(static_key_disable);
206 static bool static_key_slow_try_dec(struct static_key *key)
208 int val;
210 val = atomic_fetch_add_unless(&key->enabled, -1, 1);
211 if (val == 1)
212 return false;
215 * The negative count check is valid even when a negative
216 * key->enabled is in use by static_key_slow_inc(); a
217 * __static_key_slow_dec() before the first static_key_slow_inc()
218 * returns is unbalanced, because all other static_key_slow_inc()
219 * instances block while the update is in progress.
221 WARN(val < 0, "jump label: negative count!\n");
222 return true;
225 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
227 lockdep_assert_cpus_held();
229 if (static_key_slow_try_dec(key))
230 return;
232 jump_label_lock();
233 if (atomic_dec_and_test(&key->enabled))
234 jump_label_update(key);
235 jump_label_unlock();
238 static void __static_key_slow_dec(struct static_key *key)
240 cpus_read_lock();
241 __static_key_slow_dec_cpuslocked(key);
242 cpus_read_unlock();
245 void jump_label_update_timeout(struct work_struct *work)
247 struct static_key_deferred *key =
248 container_of(work, struct static_key_deferred, work.work);
249 __static_key_slow_dec(&key->key);
251 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
253 void static_key_slow_dec(struct static_key *key)
255 STATIC_KEY_CHECK_USE(key);
256 __static_key_slow_dec(key);
258 EXPORT_SYMBOL_GPL(static_key_slow_dec);
260 void static_key_slow_dec_cpuslocked(struct static_key *key)
262 STATIC_KEY_CHECK_USE(key);
263 __static_key_slow_dec_cpuslocked(key);
266 void __static_key_slow_dec_deferred(struct static_key *key,
267 struct delayed_work *work,
268 unsigned long timeout)
270 STATIC_KEY_CHECK_USE(key);
272 if (static_key_slow_try_dec(key))
273 return;
275 schedule_delayed_work(work, timeout);
277 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
279 void __static_key_deferred_flush(void *key, struct delayed_work *work)
281 STATIC_KEY_CHECK_USE(key);
282 flush_delayed_work(work);
284 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
286 void jump_label_rate_limit(struct static_key_deferred *key,
287 unsigned long rl)
289 STATIC_KEY_CHECK_USE(key);
290 key->timeout = rl;
291 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
293 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
295 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
297 if (jump_entry_code(entry) <= (unsigned long)end &&
298 jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
299 return 1;
301 return 0;
304 static int __jump_label_text_reserved(struct jump_entry *iter_start,
305 struct jump_entry *iter_stop, void *start, void *end)
307 struct jump_entry *iter;
309 iter = iter_start;
310 while (iter < iter_stop) {
311 if (addr_conflict(iter, start, end))
312 return 1;
313 iter++;
316 return 0;
320 * Update code which is definitely not currently executing.
321 * Architectures which need heavyweight synchronization to modify
322 * running code can override this to make the non-live update case
323 * cheaper.
325 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
326 enum jump_label_type type)
328 arch_jump_label_transform(entry, type);
331 static inline struct jump_entry *static_key_entries(struct static_key *key)
333 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
334 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
337 static inline bool static_key_type(struct static_key *key)
339 return key->type & JUMP_TYPE_TRUE;
342 static inline bool static_key_linked(struct static_key *key)
344 return key->type & JUMP_TYPE_LINKED;
347 static inline void static_key_clear_linked(struct static_key *key)
349 key->type &= ~JUMP_TYPE_LINKED;
352 static inline void static_key_set_linked(struct static_key *key)
354 key->type |= JUMP_TYPE_LINKED;
357 /***
358 * A 'struct static_key' uses a union such that it either points directly
359 * to a table of 'struct jump_entry' or to a linked list of modules which in
360 * turn point to 'struct jump_entry' tables.
362 * The two lower bits of the pointer are used to keep track of which pointer
363 * type is in use and to store the initial branch direction, we use an access
364 * function which preserves these bits.
366 static void static_key_set_entries(struct static_key *key,
367 struct jump_entry *entries)
369 unsigned long type;
371 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
372 type = key->type & JUMP_TYPE_MASK;
373 key->entries = entries;
374 key->type |= type;
377 static enum jump_label_type jump_label_type(struct jump_entry *entry)
379 struct static_key *key = jump_entry_key(entry);
380 bool enabled = static_key_enabled(key);
381 bool branch = jump_entry_is_branch(entry);
383 /* See the comment in linux/jump_label.h */
384 return enabled ^ branch;
387 static void __jump_label_update(struct static_key *key,
388 struct jump_entry *entry,
389 struct jump_entry *stop,
390 bool init)
392 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
394 * An entry->code of 0 indicates an entry which has been
395 * disabled because it was in an init text area.
397 if (init || !jump_entry_is_init(entry)) {
398 if (kernel_text_address(jump_entry_code(entry)))
399 arch_jump_label_transform(entry, jump_label_type(entry));
400 else
401 WARN_ONCE(1, "can't patch jump_label at %pS",
402 (void *)jump_entry_code(entry));
407 void __init jump_label_init(void)
409 struct jump_entry *iter_start = __start___jump_table;
410 struct jump_entry *iter_stop = __stop___jump_table;
411 struct static_key *key = NULL;
412 struct jump_entry *iter;
415 * Since we are initializing the static_key.enabled field with
416 * with the 'raw' int values (to avoid pulling in atomic.h) in
417 * jump_label.h, let's make sure that is safe. There are only two
418 * cases to check since we initialize to 0 or 1.
420 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
421 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
423 if (static_key_initialized)
424 return;
426 cpus_read_lock();
427 jump_label_lock();
428 jump_label_sort_entries(iter_start, iter_stop);
430 for (iter = iter_start; iter < iter_stop; iter++) {
431 struct static_key *iterk;
433 /* rewrite NOPs */
434 if (jump_label_type(iter) == JUMP_LABEL_NOP)
435 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
437 if (init_section_contains((void *)jump_entry_code(iter), 1))
438 jump_entry_set_init(iter);
440 iterk = jump_entry_key(iter);
441 if (iterk == key)
442 continue;
444 key = iterk;
445 static_key_set_entries(key, iter);
447 static_key_initialized = true;
448 jump_label_unlock();
449 cpus_read_unlock();
452 #ifdef CONFIG_MODULES
454 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
456 struct static_key *key = jump_entry_key(entry);
457 bool type = static_key_type(key);
458 bool branch = jump_entry_is_branch(entry);
460 /* See the comment in linux/jump_label.h */
461 return type ^ branch;
464 struct static_key_mod {
465 struct static_key_mod *next;
466 struct jump_entry *entries;
467 struct module *mod;
470 static inline struct static_key_mod *static_key_mod(struct static_key *key)
472 WARN_ON_ONCE(!static_key_linked(key));
473 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
476 /***
477 * key->type and key->next are the same via union.
478 * This sets key->next and preserves the type bits.
480 * See additional comments above static_key_set_entries().
482 static void static_key_set_mod(struct static_key *key,
483 struct static_key_mod *mod)
485 unsigned long type;
487 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
488 type = key->type & JUMP_TYPE_MASK;
489 key->next = mod;
490 key->type |= type;
493 static int __jump_label_mod_text_reserved(void *start, void *end)
495 struct module *mod;
497 preempt_disable();
498 mod = __module_text_address((unsigned long)start);
499 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
500 preempt_enable();
502 if (!mod)
503 return 0;
506 return __jump_label_text_reserved(mod->jump_entries,
507 mod->jump_entries + mod->num_jump_entries,
508 start, end);
511 static void __jump_label_mod_update(struct static_key *key)
513 struct static_key_mod *mod;
515 for (mod = static_key_mod(key); mod; mod = mod->next) {
516 struct jump_entry *stop;
517 struct module *m;
520 * NULL if the static_key is defined in a module
521 * that does not use it
523 if (!mod->entries)
524 continue;
526 m = mod->mod;
527 if (!m)
528 stop = __stop___jump_table;
529 else
530 stop = m->jump_entries + m->num_jump_entries;
531 __jump_label_update(key, mod->entries, stop,
532 m && m->state == MODULE_STATE_COMING);
536 /***
537 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
538 * @mod: module to patch
540 * Allow for run-time selection of the optimal nops. Before the module
541 * loads patch these with arch_get_jump_label_nop(), which is specified by
542 * the arch specific jump label code.
544 void jump_label_apply_nops(struct module *mod)
546 struct jump_entry *iter_start = mod->jump_entries;
547 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
548 struct jump_entry *iter;
550 /* if the module doesn't have jump label entries, just return */
551 if (iter_start == iter_stop)
552 return;
554 for (iter = iter_start; iter < iter_stop; iter++) {
555 /* Only write NOPs for arch_branch_static(). */
556 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
557 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
561 static int jump_label_add_module(struct module *mod)
563 struct jump_entry *iter_start = mod->jump_entries;
564 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
565 struct jump_entry *iter;
566 struct static_key *key = NULL;
567 struct static_key_mod *jlm, *jlm2;
569 /* if the module doesn't have jump label entries, just return */
570 if (iter_start == iter_stop)
571 return 0;
573 jump_label_sort_entries(iter_start, iter_stop);
575 for (iter = iter_start; iter < iter_stop; iter++) {
576 struct static_key *iterk;
578 if (within_module_init(jump_entry_code(iter), mod))
579 jump_entry_set_init(iter);
581 iterk = jump_entry_key(iter);
582 if (iterk == key)
583 continue;
585 key = iterk;
586 if (within_module((unsigned long)key, mod)) {
587 static_key_set_entries(key, iter);
588 continue;
590 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
591 if (!jlm)
592 return -ENOMEM;
593 if (!static_key_linked(key)) {
594 jlm2 = kzalloc(sizeof(struct static_key_mod),
595 GFP_KERNEL);
596 if (!jlm2) {
597 kfree(jlm);
598 return -ENOMEM;
600 preempt_disable();
601 jlm2->mod = __module_address((unsigned long)key);
602 preempt_enable();
603 jlm2->entries = static_key_entries(key);
604 jlm2->next = NULL;
605 static_key_set_mod(key, jlm2);
606 static_key_set_linked(key);
608 jlm->mod = mod;
609 jlm->entries = iter;
610 jlm->next = static_key_mod(key);
611 static_key_set_mod(key, jlm);
612 static_key_set_linked(key);
614 /* Only update if we've changed from our initial state */
615 if (jump_label_type(iter) != jump_label_init_type(iter))
616 __jump_label_update(key, iter, iter_stop, true);
619 return 0;
622 static void jump_label_del_module(struct module *mod)
624 struct jump_entry *iter_start = mod->jump_entries;
625 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
626 struct jump_entry *iter;
627 struct static_key *key = NULL;
628 struct static_key_mod *jlm, **prev;
630 for (iter = iter_start; iter < iter_stop; iter++) {
631 if (jump_entry_key(iter) == key)
632 continue;
634 key = jump_entry_key(iter);
636 if (within_module((unsigned long)key, mod))
637 continue;
639 /* No memory during module load */
640 if (WARN_ON(!static_key_linked(key)))
641 continue;
643 prev = &key->next;
644 jlm = static_key_mod(key);
646 while (jlm && jlm->mod != mod) {
647 prev = &jlm->next;
648 jlm = jlm->next;
651 /* No memory during module load */
652 if (WARN_ON(!jlm))
653 continue;
655 if (prev == &key->next)
656 static_key_set_mod(key, jlm->next);
657 else
658 *prev = jlm->next;
660 kfree(jlm);
662 jlm = static_key_mod(key);
663 /* if only one etry is left, fold it back into the static_key */
664 if (jlm->next == NULL) {
665 static_key_set_entries(key, jlm->entries);
666 static_key_clear_linked(key);
667 kfree(jlm);
672 static int
673 jump_label_module_notify(struct notifier_block *self, unsigned long val,
674 void *data)
676 struct module *mod = data;
677 int ret = 0;
679 cpus_read_lock();
680 jump_label_lock();
682 switch (val) {
683 case MODULE_STATE_COMING:
684 ret = jump_label_add_module(mod);
685 if (ret) {
686 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
687 jump_label_del_module(mod);
689 break;
690 case MODULE_STATE_GOING:
691 jump_label_del_module(mod);
692 break;
695 jump_label_unlock();
696 cpus_read_unlock();
698 return notifier_from_errno(ret);
701 static struct notifier_block jump_label_module_nb = {
702 .notifier_call = jump_label_module_notify,
703 .priority = 1, /* higher than tracepoints */
706 static __init int jump_label_init_module(void)
708 return register_module_notifier(&jump_label_module_nb);
710 early_initcall(jump_label_init_module);
712 #endif /* CONFIG_MODULES */
714 /***
715 * jump_label_text_reserved - check if addr range is reserved
716 * @start: start text addr
717 * @end: end text addr
719 * checks if the text addr located between @start and @end
720 * overlaps with any of the jump label patch addresses. Code
721 * that wants to modify kernel text should first verify that
722 * it does not overlap with any of the jump label addresses.
723 * Caller must hold jump_label_mutex.
725 * returns 1 if there is an overlap, 0 otherwise
727 int jump_label_text_reserved(void *start, void *end)
729 int ret = __jump_label_text_reserved(__start___jump_table,
730 __stop___jump_table, start, end);
732 if (ret)
733 return ret;
735 #ifdef CONFIG_MODULES
736 ret = __jump_label_mod_text_reserved(start, end);
737 #endif
738 return ret;
741 static void jump_label_update(struct static_key *key)
743 struct jump_entry *stop = __stop___jump_table;
744 struct jump_entry *entry;
745 #ifdef CONFIG_MODULES
746 struct module *mod;
748 if (static_key_linked(key)) {
749 __jump_label_mod_update(key);
750 return;
753 preempt_disable();
754 mod = __module_address((unsigned long)key);
755 if (mod)
756 stop = mod->jump_entries + mod->num_jump_entries;
757 preempt_enable();
758 #endif
759 entry = static_key_entries(key);
760 /* if there are no users, entry can be NULL */
761 if (entry)
762 __jump_label_update(key, entry, stop,
763 system_state < SYSTEM_RUNNING);
766 #ifdef CONFIG_STATIC_KEYS_SELFTEST
767 static DEFINE_STATIC_KEY_TRUE(sk_true);
768 static DEFINE_STATIC_KEY_FALSE(sk_false);
770 static __init int jump_label_test(void)
772 int i;
774 for (i = 0; i < 2; i++) {
775 WARN_ON(static_key_enabled(&sk_true.key) != true);
776 WARN_ON(static_key_enabled(&sk_false.key) != false);
778 WARN_ON(!static_branch_likely(&sk_true));
779 WARN_ON(!static_branch_unlikely(&sk_true));
780 WARN_ON(static_branch_likely(&sk_false));
781 WARN_ON(static_branch_unlikely(&sk_false));
783 static_branch_disable(&sk_true);
784 static_branch_enable(&sk_false);
786 WARN_ON(static_key_enabled(&sk_true.key) == true);
787 WARN_ON(static_key_enabled(&sk_false.key) == false);
789 WARN_ON(static_branch_likely(&sk_true));
790 WARN_ON(static_branch_unlikely(&sk_true));
791 WARN_ON(!static_branch_likely(&sk_false));
792 WARN_ON(!static_branch_unlikely(&sk_false));
794 static_branch_enable(&sk_true);
795 static_branch_disable(&sk_false);
798 return 0;
800 early_initcall(jump_label_test);
801 #endif /* STATIC_KEYS_SELFTEST */