4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 #include <linux/bug.h>
18 #include <linux/cpu.h>
20 #ifdef HAVE_JUMP_LABEL
22 /* mutex to protect coming/going of the the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex
);
25 void jump_label_lock(void)
27 mutex_lock(&jump_label_mutex
);
30 void jump_label_unlock(void)
32 mutex_unlock(&jump_label_mutex
);
35 static int jump_label_cmp(const void *a
, const void *b
)
37 const struct jump_entry
*jea
= a
;
38 const struct jump_entry
*jeb
= b
;
40 if (jea
->key
< jeb
->key
)
43 if (jea
->key
> jeb
->key
)
50 jump_label_sort_entries(struct jump_entry
*start
, struct jump_entry
*stop
)
54 size
= (((unsigned long)stop
- (unsigned long)start
)
55 / sizeof(struct jump_entry
));
56 sort(start
, size
, sizeof(struct jump_entry
), jump_label_cmp
, NULL
);
59 static void jump_label_update(struct static_key
*key
);
62 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
63 * The use of 'atomic_read()' requires atomic.h and its problematic for some
64 * kernel headers such as kernel.h and others. Since static_key_count() is not
65 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
66 * to have it be a function here. Similarly, for 'static_key_enable()' and
67 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
68 * to be included from most/all places for HAVE_JUMP_LABEL.
70 int static_key_count(struct static_key
*key
)
73 * -1 means the first static_key_slow_inc() is in progress.
74 * static_key_enabled() must return true, so return 1 here.
76 int n
= atomic_read(&key
->enabled
);
78 return n
>= 0 ? n
: 1;
80 EXPORT_SYMBOL_GPL(static_key_count
);
82 void static_key_slow_inc_cpuslocked(struct static_key
*key
)
86 STATIC_KEY_CHECK_USE(key
);
89 * Careful if we get concurrent static_key_slow_inc() calls;
90 * later calls must wait for the first one to _finish_ the
91 * jump_label_update() process. At the same time, however,
92 * the jump_label_update() call below wants to see
93 * static_key_enabled(&key) for jumps to be updated properly.
95 * So give a special meaning to negative key->enabled: it sends
96 * static_key_slow_inc() down the slow path, and it is non-zero
97 * so it counts as "enabled" in jump_label_update(). Note that
98 * atomic_inc_unless_negative() checks >= 0, so roll our own.
100 for (v
= atomic_read(&key
->enabled
); v
> 0; v
= v1
) {
101 v1
= atomic_cmpxchg(&key
->enabled
, v
, v
+ 1);
107 if (atomic_read(&key
->enabled
) == 0) {
108 atomic_set(&key
->enabled
, -1);
109 jump_label_update(key
);
111 * Ensure that if the above cmpxchg loop observes our positive
112 * value, it must also observe all the text changes.
114 atomic_set_release(&key
->enabled
, 1);
116 atomic_inc(&key
->enabled
);
121 void static_key_slow_inc(struct static_key
*key
)
124 static_key_slow_inc_cpuslocked(key
);
127 EXPORT_SYMBOL_GPL(static_key_slow_inc
);
129 void static_key_enable_cpuslocked(struct static_key
*key
)
131 STATIC_KEY_CHECK_USE(key
);
133 if (atomic_read(&key
->enabled
) > 0) {
134 WARN_ON_ONCE(atomic_read(&key
->enabled
) != 1);
139 if (atomic_read(&key
->enabled
) == 0) {
140 atomic_set(&key
->enabled
, -1);
141 jump_label_update(key
);
143 * See static_key_slow_inc().
145 atomic_set_release(&key
->enabled
, 1);
149 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked
);
151 void static_key_enable(struct static_key
*key
)
154 static_key_enable_cpuslocked(key
);
157 EXPORT_SYMBOL_GPL(static_key_enable
);
159 void static_key_disable_cpuslocked(struct static_key
*key
)
161 STATIC_KEY_CHECK_USE(key
);
163 if (atomic_read(&key
->enabled
) != 1) {
164 WARN_ON_ONCE(atomic_read(&key
->enabled
) != 0);
169 if (atomic_cmpxchg(&key
->enabled
, 1, 0))
170 jump_label_update(key
);
173 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked
);
175 void static_key_disable(struct static_key
*key
)
178 static_key_disable_cpuslocked(key
);
181 EXPORT_SYMBOL_GPL(static_key_disable
);
183 static void __static_key_slow_dec_cpuslocked(struct static_key
*key
,
184 unsigned long rate_limit
,
185 struct delayed_work
*work
)
188 * The negative count check is valid even when a negative
189 * key->enabled is in use by static_key_slow_inc(); a
190 * __static_key_slow_dec() before the first static_key_slow_inc()
191 * returns is unbalanced, because all other static_key_slow_inc()
192 * instances block while the update is in progress.
194 if (!atomic_dec_and_mutex_lock(&key
->enabled
, &jump_label_mutex
)) {
195 WARN(atomic_read(&key
->enabled
) < 0,
196 "jump label: negative count!\n");
201 atomic_inc(&key
->enabled
);
202 schedule_delayed_work(work
, rate_limit
);
204 jump_label_update(key
);
209 static void __static_key_slow_dec(struct static_key
*key
,
210 unsigned long rate_limit
,
211 struct delayed_work
*work
)
214 __static_key_slow_dec_cpuslocked(key
, rate_limit
, work
);
218 static void jump_label_update_timeout(struct work_struct
*work
)
220 struct static_key_deferred
*key
=
221 container_of(work
, struct static_key_deferred
, work
.work
);
222 __static_key_slow_dec(&key
->key
, 0, NULL
);
225 void static_key_slow_dec(struct static_key
*key
)
227 STATIC_KEY_CHECK_USE(key
);
228 __static_key_slow_dec(key
, 0, NULL
);
230 EXPORT_SYMBOL_GPL(static_key_slow_dec
);
232 void static_key_slow_dec_cpuslocked(struct static_key
*key
)
234 STATIC_KEY_CHECK_USE(key
);
235 __static_key_slow_dec_cpuslocked(key
, 0, NULL
);
238 void static_key_slow_dec_deferred(struct static_key_deferred
*key
)
240 STATIC_KEY_CHECK_USE(key
);
241 __static_key_slow_dec(&key
->key
, key
->timeout
, &key
->work
);
243 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred
);
245 void static_key_deferred_flush(struct static_key_deferred
*key
)
247 STATIC_KEY_CHECK_USE(key
);
248 flush_delayed_work(&key
->work
);
250 EXPORT_SYMBOL_GPL(static_key_deferred_flush
);
252 void jump_label_rate_limit(struct static_key_deferred
*key
,
255 STATIC_KEY_CHECK_USE(key
);
257 INIT_DELAYED_WORK(&key
->work
, jump_label_update_timeout
);
259 EXPORT_SYMBOL_GPL(jump_label_rate_limit
);
261 static int addr_conflict(struct jump_entry
*entry
, void *start
, void *end
)
263 if (entry
->code
<= (unsigned long)end
&&
264 entry
->code
+ JUMP_LABEL_NOP_SIZE
> (unsigned long)start
)
270 static int __jump_label_text_reserved(struct jump_entry
*iter_start
,
271 struct jump_entry
*iter_stop
, void *start
, void *end
)
273 struct jump_entry
*iter
;
276 while (iter
< iter_stop
) {
277 if (addr_conflict(iter
, start
, end
))
286 * Update code which is definitely not currently executing.
287 * Architectures which need heavyweight synchronization to modify
288 * running code can override this to make the non-live update case
291 void __weak __init_or_module
arch_jump_label_transform_static(struct jump_entry
*entry
,
292 enum jump_label_type type
)
294 arch_jump_label_transform(entry
, type
);
297 static inline struct jump_entry
*static_key_entries(struct static_key
*key
)
299 WARN_ON_ONCE(key
->type
& JUMP_TYPE_LINKED
);
300 return (struct jump_entry
*)(key
->type
& ~JUMP_TYPE_MASK
);
303 static inline bool static_key_type(struct static_key
*key
)
305 return key
->type
& JUMP_TYPE_TRUE
;
308 static inline bool static_key_linked(struct static_key
*key
)
310 return key
->type
& JUMP_TYPE_LINKED
;
313 static inline void static_key_clear_linked(struct static_key
*key
)
315 key
->type
&= ~JUMP_TYPE_LINKED
;
318 static inline void static_key_set_linked(struct static_key
*key
)
320 key
->type
|= JUMP_TYPE_LINKED
;
323 static inline struct static_key
*jump_entry_key(struct jump_entry
*entry
)
325 return (struct static_key
*)((unsigned long)entry
->key
& ~1UL);
328 static bool jump_entry_branch(struct jump_entry
*entry
)
330 return (unsigned long)entry
->key
& 1UL;
334 * A 'struct static_key' uses a union such that it either points directly
335 * to a table of 'struct jump_entry' or to a linked list of modules which in
336 * turn point to 'struct jump_entry' tables.
338 * The two lower bits of the pointer are used to keep track of which pointer
339 * type is in use and to store the initial branch direction, we use an access
340 * function which preserves these bits.
342 static void static_key_set_entries(struct static_key
*key
,
343 struct jump_entry
*entries
)
347 WARN_ON_ONCE((unsigned long)entries
& JUMP_TYPE_MASK
);
348 type
= key
->type
& JUMP_TYPE_MASK
;
349 key
->entries
= entries
;
353 static enum jump_label_type
jump_label_type(struct jump_entry
*entry
)
355 struct static_key
*key
= jump_entry_key(entry
);
356 bool enabled
= static_key_enabled(key
);
357 bool branch
= jump_entry_branch(entry
);
359 /* See the comment in linux/jump_label.h */
360 return enabled
^ branch
;
363 static void __jump_label_update(struct static_key
*key
,
364 struct jump_entry
*entry
,
365 struct jump_entry
*stop
)
367 for (; (entry
< stop
) && (jump_entry_key(entry
) == key
); entry
++) {
369 * An entry->code of 0 indicates an entry which has been
370 * disabled because it was in an init text area.
373 if (kernel_text_address(entry
->code
))
374 arch_jump_label_transform(entry
, jump_label_type(entry
));
376 WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry
->code
);
381 void __init
jump_label_init(void)
383 struct jump_entry
*iter_start
= __start___jump_table
;
384 struct jump_entry
*iter_stop
= __stop___jump_table
;
385 struct static_key
*key
= NULL
;
386 struct jump_entry
*iter
;
389 * Since we are initializing the static_key.enabled field with
390 * with the 'raw' int values (to avoid pulling in atomic.h) in
391 * jump_label.h, let's make sure that is safe. There are only two
392 * cases to check since we initialize to 0 or 1.
394 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
395 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
397 if (static_key_initialized
)
402 jump_label_sort_entries(iter_start
, iter_stop
);
404 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
405 struct static_key
*iterk
;
408 if (jump_label_type(iter
) == JUMP_LABEL_NOP
)
409 arch_jump_label_transform_static(iter
, JUMP_LABEL_NOP
);
411 iterk
= jump_entry_key(iter
);
416 static_key_set_entries(key
, iter
);
418 static_key_initialized
= true;
423 /* Disable any jump label entries in __init code */
424 void __init
jump_label_invalidate_init(void)
426 struct jump_entry
*iter_start
= __start___jump_table
;
427 struct jump_entry
*iter_stop
= __stop___jump_table
;
428 struct jump_entry
*iter
;
430 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
431 if (init_kernel_text(iter
->code
))
436 #ifdef CONFIG_MODULES
438 static enum jump_label_type
jump_label_init_type(struct jump_entry
*entry
)
440 struct static_key
*key
= jump_entry_key(entry
);
441 bool type
= static_key_type(key
);
442 bool branch
= jump_entry_branch(entry
);
444 /* See the comment in linux/jump_label.h */
445 return type
^ branch
;
448 struct static_key_mod
{
449 struct static_key_mod
*next
;
450 struct jump_entry
*entries
;
454 static inline struct static_key_mod
*static_key_mod(struct static_key
*key
)
456 WARN_ON_ONCE(!(key
->type
& JUMP_TYPE_LINKED
));
457 return (struct static_key_mod
*)(key
->type
& ~JUMP_TYPE_MASK
);
461 * key->type and key->next are the same via union.
462 * This sets key->next and preserves the type bits.
464 * See additional comments above static_key_set_entries().
466 static void static_key_set_mod(struct static_key
*key
,
467 struct static_key_mod
*mod
)
471 WARN_ON_ONCE((unsigned long)mod
& JUMP_TYPE_MASK
);
472 type
= key
->type
& JUMP_TYPE_MASK
;
477 static int __jump_label_mod_text_reserved(void *start
, void *end
)
482 mod
= __module_text_address((unsigned long)start
);
483 WARN_ON_ONCE(__module_text_address((unsigned long)end
) != mod
);
490 return __jump_label_text_reserved(mod
->jump_entries
,
491 mod
->jump_entries
+ mod
->num_jump_entries
,
495 static void __jump_label_mod_update(struct static_key
*key
)
497 struct static_key_mod
*mod
;
499 for (mod
= static_key_mod(key
); mod
; mod
= mod
->next
) {
500 struct jump_entry
*stop
;
504 * NULL if the static_key is defined in a module
505 * that does not use it
512 stop
= __stop___jump_table
;
514 stop
= m
->jump_entries
+ m
->num_jump_entries
;
515 __jump_label_update(key
, mod
->entries
, stop
);
520 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
521 * @mod: module to patch
523 * Allow for run-time selection of the optimal nops. Before the module
524 * loads patch these with arch_get_jump_label_nop(), which is specified by
525 * the arch specific jump label code.
527 void jump_label_apply_nops(struct module
*mod
)
529 struct jump_entry
*iter_start
= mod
->jump_entries
;
530 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
531 struct jump_entry
*iter
;
533 /* if the module doesn't have jump label entries, just return */
534 if (iter_start
== iter_stop
)
537 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
538 /* Only write NOPs for arch_branch_static(). */
539 if (jump_label_init_type(iter
) == JUMP_LABEL_NOP
)
540 arch_jump_label_transform_static(iter
, JUMP_LABEL_NOP
);
544 static int jump_label_add_module(struct module
*mod
)
546 struct jump_entry
*iter_start
= mod
->jump_entries
;
547 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
548 struct jump_entry
*iter
;
549 struct static_key
*key
= NULL
;
550 struct static_key_mod
*jlm
, *jlm2
;
552 /* if the module doesn't have jump label entries, just return */
553 if (iter_start
== iter_stop
)
556 jump_label_sort_entries(iter_start
, iter_stop
);
558 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
559 struct static_key
*iterk
;
561 iterk
= jump_entry_key(iter
);
566 if (within_module(iter
->key
, mod
)) {
567 static_key_set_entries(key
, iter
);
570 jlm
= kzalloc(sizeof(struct static_key_mod
), GFP_KERNEL
);
573 if (!static_key_linked(key
)) {
574 jlm2
= kzalloc(sizeof(struct static_key_mod
),
581 jlm2
->mod
= __module_address((unsigned long)key
);
583 jlm2
->entries
= static_key_entries(key
);
585 static_key_set_mod(key
, jlm2
);
586 static_key_set_linked(key
);
590 jlm
->next
= static_key_mod(key
);
591 static_key_set_mod(key
, jlm
);
592 static_key_set_linked(key
);
594 /* Only update if we've changed from our initial state */
595 if (jump_label_type(iter
) != jump_label_init_type(iter
))
596 __jump_label_update(key
, iter
, iter_stop
);
602 static void jump_label_del_module(struct module
*mod
)
604 struct jump_entry
*iter_start
= mod
->jump_entries
;
605 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
606 struct jump_entry
*iter
;
607 struct static_key
*key
= NULL
;
608 struct static_key_mod
*jlm
, **prev
;
610 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
611 if (jump_entry_key(iter
) == key
)
614 key
= jump_entry_key(iter
);
616 if (within_module(iter
->key
, mod
))
619 /* No memory during module load */
620 if (WARN_ON(!static_key_linked(key
)))
624 jlm
= static_key_mod(key
);
626 while (jlm
&& jlm
->mod
!= mod
) {
631 /* No memory during module load */
635 if (prev
== &key
->next
)
636 static_key_set_mod(key
, jlm
->next
);
642 jlm
= static_key_mod(key
);
643 /* if only one etry is left, fold it back into the static_key */
644 if (jlm
->next
== NULL
) {
645 static_key_set_entries(key
, jlm
->entries
);
646 static_key_clear_linked(key
);
652 /* Disable any jump label entries in module init code */
653 static void jump_label_invalidate_module_init(struct module
*mod
)
655 struct jump_entry
*iter_start
= mod
->jump_entries
;
656 struct jump_entry
*iter_stop
= iter_start
+ mod
->num_jump_entries
;
657 struct jump_entry
*iter
;
659 for (iter
= iter_start
; iter
< iter_stop
; iter
++) {
660 if (within_module_init(iter
->code
, mod
))
666 jump_label_module_notify(struct notifier_block
*self
, unsigned long val
,
669 struct module
*mod
= data
;
676 case MODULE_STATE_COMING
:
677 ret
= jump_label_add_module(mod
);
679 WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
680 jump_label_del_module(mod
);
683 case MODULE_STATE_GOING
:
684 jump_label_del_module(mod
);
686 case MODULE_STATE_LIVE
:
687 jump_label_invalidate_module_init(mod
);
694 return notifier_from_errno(ret
);
697 static struct notifier_block jump_label_module_nb
= {
698 .notifier_call
= jump_label_module_notify
,
699 .priority
= 1, /* higher than tracepoints */
702 static __init
int jump_label_init_module(void)
704 return register_module_notifier(&jump_label_module_nb
);
706 early_initcall(jump_label_init_module
);
708 #endif /* CONFIG_MODULES */
711 * jump_label_text_reserved - check if addr range is reserved
712 * @start: start text addr
713 * @end: end text addr
715 * checks if the text addr located between @start and @end
716 * overlaps with any of the jump label patch addresses. Code
717 * that wants to modify kernel text should first verify that
718 * it does not overlap with any of the jump label addresses.
719 * Caller must hold jump_label_mutex.
721 * returns 1 if there is an overlap, 0 otherwise
723 int jump_label_text_reserved(void *start
, void *end
)
725 int ret
= __jump_label_text_reserved(__start___jump_table
,
726 __stop___jump_table
, start
, end
);
731 #ifdef CONFIG_MODULES
732 ret
= __jump_label_mod_text_reserved(start
, end
);
737 static void jump_label_update(struct static_key
*key
)
739 struct jump_entry
*stop
= __stop___jump_table
;
740 struct jump_entry
*entry
;
741 #ifdef CONFIG_MODULES
744 if (static_key_linked(key
)) {
745 __jump_label_mod_update(key
);
750 mod
= __module_address((unsigned long)key
);
752 stop
= mod
->jump_entries
+ mod
->num_jump_entries
;
755 entry
= static_key_entries(key
);
756 /* if there are no users, entry can be NULL */
758 __jump_label_update(key
, entry
, stop
);
761 #ifdef CONFIG_STATIC_KEYS_SELFTEST
762 static DEFINE_STATIC_KEY_TRUE(sk_true
);
763 static DEFINE_STATIC_KEY_FALSE(sk_false
);
765 static __init
int jump_label_test(void)
769 for (i
= 0; i
< 2; i
++) {
770 WARN_ON(static_key_enabled(&sk_true
.key
) != true);
771 WARN_ON(static_key_enabled(&sk_false
.key
) != false);
773 WARN_ON(!static_branch_likely(&sk_true
));
774 WARN_ON(!static_branch_unlikely(&sk_true
));
775 WARN_ON(static_branch_likely(&sk_false
));
776 WARN_ON(static_branch_unlikely(&sk_false
));
778 static_branch_disable(&sk_true
);
779 static_branch_enable(&sk_false
);
781 WARN_ON(static_key_enabled(&sk_true
.key
) == true);
782 WARN_ON(static_key_enabled(&sk_false
.key
) == false);
784 WARN_ON(static_branch_likely(&sk_true
));
785 WARN_ON(static_branch_unlikely(&sk_true
));
786 WARN_ON(!static_branch_likely(&sk_false
));
787 WARN_ON(!static_branch_unlikely(&sk_false
));
789 static_branch_enable(&sk_true
);
790 static_branch_disable(&sk_false
);
795 early_initcall(jump_label_test
);
796 #endif /* STATIC_KEYS_SELFTEST */
798 #endif /* HAVE_JUMP_LABEL */