4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18 #include <linux/cpu.h>
19 #include <asm/sections.h>
21 #ifdef HAVE_JUMP_LABEL
23 /* mutex to protect coming/going of the the jump_label table */
24 static DEFINE_MUTEX(jump_label_mutex
);
26 void jump_label_lock(void)
28 mutex_lock(&jump_label_mutex
);
31 void jump_label_unlock(void)
33 mutex_unlock(&jump_label_mutex
);
36 static int jump_label_cmp(const void *a
, const void *b
)
38 const struct jump_entry
*jea
= a
;
39 const struct jump_entry
*jeb
= b
;
41 if (jump_entry_key(jea
) < jump_entry_key(jeb
))
44 if (jump_entry_key(jea
) > jump_entry_key(jeb
))
50 static void jump_label_swap(void *a
, void *b
, int size
)
52 long delta
= (unsigned long)a
- (unsigned long)b
;
53 struct jump_entry
*jea
= a
;
54 struct jump_entry
*jeb
= b
;
55 struct jump_entry tmp
= *jea
;
57 jea
->code
= jeb
->code
- delta
;
58 jea
->target
= jeb
->target
- delta
;
59 jea
->key
= jeb
->key
- delta
;
61 jeb
->code
= tmp
.code
+ delta
;
62 jeb
->target
= tmp
.target
+ delta
;
63 jeb
->key
= tmp
.key
+ delta
;
67 jump_label_sort_entries(struct jump_entry
*start
, struct jump_entry
*stop
)
72 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
))
73 swapfn
= jump_label_swap
;
75 size
= (((unsigned long)stop
- (unsigned long)start
)
76 / sizeof(struct jump_entry
));
77 sort(start
, size
, sizeof(struct jump_entry
), jump_label_cmp
, swapfn
);
80 static void jump_label_update(struct static_key
*key
);
83 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
84 * The use of 'atomic_read()' requires atomic.h and its problematic for some
85 * kernel headers such as kernel.h and others. Since static_key_count() is not
86 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
87 * to have it be a function here. Similarly, for 'static_key_enable()' and
88 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
89 * to be included from most/all places for HAVE_JUMP_LABEL.
91 int static_key_count(struct static_key
*key
)
94 * -1 means the first static_key_slow_inc() is in progress.
95 * static_key_enabled() must return true, so return 1 here.
97 int n
= atomic_read(&key
->enabled
);
99 return n
>= 0 ? n
: 1;
101 EXPORT_SYMBOL_GPL(static_key_count
);
103 void static_key_slow_inc_cpuslocked(struct static_key
*key
)
107 STATIC_KEY_CHECK_USE(key
);
108 lockdep_assert_cpus_held();
111 * Careful if we get concurrent static_key_slow_inc() calls;
112 * later calls must wait for the first one to _finish_ the
113 * jump_label_update() process. At the same time, however,
114 * the jump_label_update() call below wants to see
115 * static_key_enabled(&key) for jumps to be updated properly.
117 * So give a special meaning to negative key->enabled: it sends
118 * static_key_slow_inc() down the slow path, and it is non-zero
119 * so it counts as "enabled" in jump_label_update(). Note that
120 * atomic_inc_unless_negative() checks >= 0, so roll our own.
122 for (v
= atomic_read(&key
->enabled
); v
> 0; v
= v1
) {
123 v1
= atomic_cmpxchg(&key
->enabled
, v
, v
+ 1);
129 if (atomic_read(&key
->enabled
) == 0) {
130 atomic_set(&key
->enabled
, -1);
131 jump_label_update(key
);
133 * Ensure that if the above cmpxchg loop observes our positive
134 * value, it must also observe all the text changes.
136 atomic_set_release(&key
->enabled
, 1);
138 atomic_inc(&key
->enabled
);
143 void static_key_slow_inc(struct static_key
*key
)
146 static_key_slow_inc_cpuslocked(key
);
149 EXPORT_SYMBOL_GPL(static_key_slow_inc
);
151 void static_key_enable_cpuslocked(struct static_key
*key
)
153 STATIC_KEY_CHECK_USE(key
);
154 lockdep_assert_cpus_held();
156 if (atomic_read(&key
->enabled
) > 0) {
157 WARN_ON_ONCE(atomic_read(&key
->enabled
) != 1);
162 if (atomic_read(&key
->enabled
) == 0) {
163 atomic_set(&key
->enabled
, -1);
164 jump_label_update(key
);
166 * See static_key_slow_inc().
168 atomic_set_release(&key
->enabled
, 1);
172 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked
);
174 void static_key_enable(struct static_key
*key
)
177 static_key_enable_cpuslocked(key
);
180 EXPORT_SYMBOL_GPL(static_key_enable
);
182 void static_key_disable_cpuslocked(struct static_key
*key
)
184 STATIC_KEY_CHECK_USE(key
);
185 lockdep_assert_cpus_held();
187 if (atomic_read(&key
->enabled
) != 1) {
188 WARN_ON_ONCE(atomic_read(&key
->enabled
) != 0);
193 if (atomic_cmpxchg(&key
->enabled
, 1, 0))
194 jump_label_update(key
);
197 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked
);
199 void static_key_disable(struct static_key
*key
)
202 static_key_disable_cpuslocked(key
);
205 EXPORT_SYMBOL_GPL(static_key_disable
);
207 static void __static_key_slow_dec_cpuslocked(struct static_key
*key
,
208 unsigned long rate_limit
,
209 struct delayed_work
*work
)
211 lockdep_assert_cpus_held();
214 * The negative count check is valid even when a negative
215 * key->enabled is in use by static_key_slow_inc(); a
216 * __static_key_slow_dec() before the first static_key_slow_inc()
217 * returns is unbalanced, because all other static_key_slow_inc()
218 * instances block while the update is in progress.
220 if (!atomic_dec_and_mutex_lock(&key
->enabled
, &jump_label_mutex
)) {
221 WARN(atomic_read(&key
->enabled
) < 0,
222 "jump label: negative count!\n");
227 atomic_inc(&key
->enabled
);
228 schedule_delayed_work(work
, rate_limit
);
230 jump_label_update(key
);
235 static void __static_key_slow_dec(struct static_key
*key
,
236 unsigned long rate_limit
,
237 struct delayed_work
*work
)
240 __static_key_slow_dec_cpuslocked(key
, rate_limit
, work
);
244 static void jump_label_update_timeout(struct work_struct
*work
)
246 struct static_key_deferred
*key
=
247 container_of(work
, struct static_key_deferred
, work
.work
);
248 __static_key_slow_dec(&key
->key
, 0, NULL
);
251 void static_key_slow_dec(struct static_key
*key
)
253 STATIC_KEY_CHECK_USE(key
);
254 __static_key_slow_dec(key
, 0, NULL
);
256 EXPORT_SYMBOL_GPL(static_key_slow_dec
);
258 void static_key_slow_dec_cpuslocked(struct static_key
*key
)
260 STATIC_KEY_CHECK_USE(key
);
261 __static_key_slow_dec_cpuslocked(key
, 0, NULL
);
264 void static_key_slow_dec_deferred(struct static_key_deferred
*key
)
266 STATIC_KEY_CHECK_USE(key
);
267 __static_key_slow_dec(&key
->key
, key
->timeout
, &key
->work
);
269 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred
);
271 void static_key_deferred_flush(struct static_key_deferred
*key
)
273 STATIC_KEY_CHECK_USE(key
);
274 flush_delayed_work(&key
->work
);
276 EXPORT_SYMBOL_GPL(static_key_deferred_flush
);
278 void jump_label_rate_limit(struct static_key_deferred
*key
,
281 STATIC_KEY_CHECK_USE(key
);
283 INIT_DELAYED_WORK(&key
->work
, jump_label_update_timeout
);
285 EXPORT_SYMBOL_GPL(jump_label_rate_limit
);
287 static int addr_conflict(struct jump_entry
*entry
, void *start
, void *end
)
289 if (jump_entry_code(entry
) <= (unsigned long)end
&&
290 jump_entry_code(entry
) + JUMP_LABEL_NOP_SIZE
> (unsigned long)start
)
296 static int __jump_label_text_reserved(struct jump_entry
*iter_start
,
297 struct jump_entry
*iter_stop
, void *start
, void *end
)
299 struct jump_entry
*iter
;
302 while (iter
< iter_stop
) {
303 if (addr_conflict(iter
, start
, end
))
312 * Update code which is definitely not currently executing.
313 * Architectures which need heavyweight synchronization to modify
314 * running code can override this to make the non-live update case
317 void __weak __init_or_module
arch_jump_label_transform_static(struct jump_entry
*entry
,
318 enum jump_label_type type
)
320 arch_jump_label_transform(entry
, type
);
323 static inline struct jump_entry
*static_key_entries(struct static_key
*key
)
325 WARN_ON_ONCE(key
->type
& JUMP_TYPE_LINKED
);
326 return (struct jump_entry
*)(key
->type
& ~JUMP_TYPE_MASK
);
329 static inline bool static_key_type(struct static_key
*key
)
331 return key
->type
& JUMP_TYPE_TRUE
;
334 static inline bool static_key_linked(struct static_key
*key
)
336 return key
->type
& JUMP_TYPE_LINKED
;
339 static inline void static_key_clear_linked(struct static_key
*key
)
341 key
->type
&= ~JUMP_TYPE_LINKED
;
344 static inline void static_key_set_linked(struct static_key
*key
)
346 key
->type
|= JUMP_TYPE_LINKED
;
350 * A 'struct static_key' uses a union such that it either points directly
351 * to a table of 'struct jump_entry' or to a linked list of modules which in
352 * turn point to 'struct jump_entry' tables.
354 * The two lower bits of the pointer are used to keep track of which pointer
355 * type is in use and to store the initial branch direction, we use an access
356 * function which preserves these bits.
358 static void static_key_set_entries(struct static_key
*key
,
359 struct jump_entry
*entries
)
363 WARN_ON_ONCE((unsigned long)entries
& JUMP_TYPE_MASK
);
364 type
= key
->type
& JUMP_TYPE_MASK
;
365 key
->entries
= entries
;
369 static enum jump_label_type
jump_label_type(struct jump_entry
*entry
)
371 struct static_key
*key
= jump_entry_key(entry
);
372 bool enabled
= static_key_enabled(key
);
373 bool branch
= jump_entry_is_branch(entry
);
375 /* See the comment in linux/jump_label.h */
376 return enabled
^ branch
;
379 static void __jump_label_update(struct static_key
*key
,
380 struct jump_entry
*entry
,
381 struct jump_entry
*stop
,
384 for (; (entry
< stop
) && (jump_entry_key(entry
) == key
); entry
++) {
386 * An entry->code of 0 indicates an entry which has been
387 * disabled because it was in an init text area.
389 if (init
|| !jump_entry_is_init(entry
)) {
390 if (kernel_text_address(jump_entry_code(entry
)))
391 arch_jump_label_transform(entry
, jump_label_type(entry
));
393 WARN_ONCE(1, "can't patch jump_label at %pS",
394 (void *)jump_entry_code(entry
));
399 void __init
jump_label_init(void)
401 struct jump_entry
*iter_start
= __start___jump_table
;
402 struct jump_entry
*iter_stop
= __stop___jump_table
;
403 struct static_key
*key
= NULL
;
404 struct jump_entry
*iter
;
407 * Since we are initializing the static_key.enabled field with
408 * with the 'raw' int values (to avoid pulling in atomic.h) in
409 * jump_label.h, let's make sure that is safe. There are only two
410 * cases to check since we initialize to 0 or 1.
412 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
413 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
415 if (static_key_initialized
)
420 jump_label_sort_entries(iter_start
, iter_stop
);
422 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
423 struct static_key
*iterk
;
426 if (jump_label_type(iter
) == JUMP_LABEL_NOP
)
427 arch_jump_label_transform_static(iter
, JUMP_LABEL_NOP
);
429 if (init_section_contains((void *)jump_entry_code(iter
), 1))
430 jump_entry_set_init(iter
);
432 iterk
= jump_entry_key(iter
);
437 static_key_set_entries(key
, iter
);
439 static_key_initialized
= true;
444 #ifdef CONFIG_MODULES
446 static enum jump_label_type
jump_label_init_type(struct jump_entry
*entry
)
448 struct static_key
*key
= jump_entry_key(entry
);
449 bool type
= static_key_type(key
);
450 bool branch
= jump_entry_is_branch(entry
);
452 /* See the comment in linux/jump_label.h */
453 return type
^ branch
;
456 struct static_key_mod
{
457 struct static_key_mod
*next
;
458 struct jump_entry
*entries
;
462 static inline struct static_key_mod
*static_key_mod(struct static_key
*key
)
464 WARN_ON_ONCE(!static_key_linked(key
));
465 return (struct static_key_mod
*)(key
->type
& ~JUMP_TYPE_MASK
);
469 * key->type and key->next are the same via union.
470 * This sets key->next and preserves the type bits.
472 * See additional comments above static_key_set_entries().
474 static void static_key_set_mod(struct static_key
*key
,
475 struct static_key_mod
*mod
)
479 WARN_ON_ONCE((unsigned long)mod
& JUMP_TYPE_MASK
);
480 type
= key
->type
& JUMP_TYPE_MASK
;
485 static int __jump_label_mod_text_reserved(void *start
, void *end
)
490 mod
= __module_text_address((unsigned long)start
);
491 WARN_ON_ONCE(__module_text_address((unsigned long)end
) != mod
);
498 return __jump_label_text_reserved(mod
->jump_entries
,
499 mod
->jump_entries
+ mod
->num_jump_entries
,
503 static void __jump_label_mod_update(struct static_key
*key
)
505 struct static_key_mod
*mod
;
507 for (mod
= static_key_mod(key
); mod
; mod
= mod
->next
) {
508 struct jump_entry
*stop
;
512 * NULL if the static_key is defined in a module
513 * that does not use it
520 stop
= __stop___jump_table
;
522 stop
= m
->jump_entries
+ m
->num_jump_entries
;
523 __jump_label_update(key
, mod
->entries
, stop
,
524 m
&& m
->state
== MODULE_STATE_COMING
);
529 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
530 * @mod: module to patch
532 * Allow for run-time selection of the optimal nops. Before the module
533 * loads patch these with arch_get_jump_label_nop(), which is specified by
534 * the arch specific jump label code.
536 void jump_label_apply_nops(struct module
*mod
)
538 struct jump_entry
*iter_start
= mod
->jump_entries
;
539 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
540 struct jump_entry
*iter
;
542 /* if the module doesn't have jump label entries, just return */
543 if (iter_start
== iter_stop
)
546 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
547 /* Only write NOPs for arch_branch_static(). */
548 if (jump_label_init_type(iter
) == JUMP_LABEL_NOP
)
549 arch_jump_label_transform_static(iter
, JUMP_LABEL_NOP
);
553 static int jump_label_add_module(struct module
*mod
)
555 struct jump_entry
*iter_start
= mod
->jump_entries
;
556 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
557 struct jump_entry
*iter
;
558 struct static_key
*key
= NULL
;
559 struct static_key_mod
*jlm
, *jlm2
;
561 /* if the module doesn't have jump label entries, just return */
562 if (iter_start
== iter_stop
)
565 jump_label_sort_entries(iter_start
, iter_stop
);
567 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
568 struct static_key
*iterk
;
570 if (within_module_init(jump_entry_code(iter
), mod
))
571 jump_entry_set_init(iter
);
573 iterk
= jump_entry_key(iter
);
578 if (within_module((unsigned long)key
, mod
)) {
579 static_key_set_entries(key
, iter
);
582 jlm
= kzalloc(sizeof(struct static_key_mod
), GFP_KERNEL
);
585 if (!static_key_linked(key
)) {
586 jlm2
= kzalloc(sizeof(struct static_key_mod
),
593 jlm2
->mod
= __module_address((unsigned long)key
);
595 jlm2
->entries
= static_key_entries(key
);
597 static_key_set_mod(key
, jlm2
);
598 static_key_set_linked(key
);
602 jlm
->next
= static_key_mod(key
);
603 static_key_set_mod(key
, jlm
);
604 static_key_set_linked(key
);
606 /* Only update if we've changed from our initial state */
607 if (jump_label_type(iter
) != jump_label_init_type(iter
))
608 __jump_label_update(key
, iter
, iter_stop
, true);
614 static void jump_label_del_module(struct module
*mod
)
616 struct jump_entry
*iter_start
= mod
->jump_entries
;
617 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
618 struct jump_entry
*iter
;
619 struct static_key
*key
= NULL
;
620 struct static_key_mod
*jlm
, **prev
;
622 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
623 if (jump_entry_key(iter
) == key
)
626 key
= jump_entry_key(iter
);
628 if (within_module((unsigned long)key
, mod
))
631 /* No memory during module load */
632 if (WARN_ON(!static_key_linked(key
)))
636 jlm
= static_key_mod(key
);
638 while (jlm
&& jlm
->mod
!= mod
) {
643 /* No memory during module load */
647 if (prev
== &key
->next
)
648 static_key_set_mod(key
, jlm
->next
);
654 jlm
= static_key_mod(key
);
655 /* if only one etry is left, fold it back into the static_key */
656 if (jlm
->next
== NULL
) {
657 static_key_set_entries(key
, jlm
->entries
);
658 static_key_clear_linked(key
);
665 jump_label_module_notify(struct notifier_block
*self
, unsigned long val
,
668 struct module
*mod
= data
;
675 case MODULE_STATE_COMING
:
676 ret
= jump_label_add_module(mod
);
678 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
679 jump_label_del_module(mod
);
682 case MODULE_STATE_GOING
:
683 jump_label_del_module(mod
);
690 return notifier_from_errno(ret
);
693 static struct notifier_block jump_label_module_nb
= {
694 .notifier_call
= jump_label_module_notify
,
695 .priority
= 1, /* higher than tracepoints */
698 static __init
int jump_label_init_module(void)
700 return register_module_notifier(&jump_label_module_nb
);
702 early_initcall(jump_label_init_module
);
704 #endif /* CONFIG_MODULES */
707 * jump_label_text_reserved - check if addr range is reserved
708 * @start: start text addr
709 * @end: end text addr
711 * checks if the text addr located between @start and @end
712 * overlaps with any of the jump label patch addresses. Code
713 * that wants to modify kernel text should first verify that
714 * it does not overlap with any of the jump label addresses.
715 * Caller must hold jump_label_mutex.
717 * returns 1 if there is an overlap, 0 otherwise
719 int jump_label_text_reserved(void *start
, void *end
)
721 int ret
= __jump_label_text_reserved(__start___jump_table
,
722 __stop___jump_table
, start
, end
);
727 #ifdef CONFIG_MODULES
728 ret
= __jump_label_mod_text_reserved(start
, end
);
733 static void jump_label_update(struct static_key
*key
)
735 struct jump_entry
*stop
= __stop___jump_table
;
736 struct jump_entry
*entry
;
737 #ifdef CONFIG_MODULES
740 if (static_key_linked(key
)) {
741 __jump_label_mod_update(key
);
746 mod
= __module_address((unsigned long)key
);
748 stop
= mod
->jump_entries
+ mod
->num_jump_entries
;
751 entry
= static_key_entries(key
);
752 /* if there are no users, entry can be NULL */
754 __jump_label_update(key
, entry
, stop
,
755 system_state
< SYSTEM_RUNNING
);
758 #ifdef CONFIG_STATIC_KEYS_SELFTEST
759 static DEFINE_STATIC_KEY_TRUE(sk_true
);
760 static DEFINE_STATIC_KEY_FALSE(sk_false
);
762 static __init
int jump_label_test(void)
766 for (i
= 0; i
< 2; i
++) {
767 WARN_ON(static_key_enabled(&sk_true
.key
) != true);
768 WARN_ON(static_key_enabled(&sk_false
.key
) != false);
770 WARN_ON(!static_branch_likely(&sk_true
));
771 WARN_ON(!static_branch_unlikely(&sk_true
));
772 WARN_ON(static_branch_likely(&sk_false
));
773 WARN_ON(static_branch_unlikely(&sk_false
));
775 static_branch_disable(&sk_true
);
776 static_branch_enable(&sk_false
);
778 WARN_ON(static_key_enabled(&sk_true
.key
) == true);
779 WARN_ON(static_key_enabled(&sk_false
.key
) == false);
781 WARN_ON(static_branch_likely(&sk_true
));
782 WARN_ON(static_branch_unlikely(&sk_true
));
783 WARN_ON(!static_branch_likely(&sk_false
));
784 WARN_ON(!static_branch_unlikely(&sk_false
));
786 static_branch_enable(&sk_true
);
787 static_branch_disable(&sk_false
);
792 early_initcall(jump_label_test
);
793 #endif /* STATIC_KEYS_SELFTEST */
795 #endif /* HAVE_JUMP_LABEL */