1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
8 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
13 * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14 * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
16 * struct static_key false = STATIC_KEY_INIT_FALSE;
17 * struct static_key true = STATIC_KEY_INIT_TRUE;
21 * The updated API replacements are:
23 * DEFINE_STATIC_KEY_TRUE(key);
24 * DEFINE_STATIC_KEY_FALSE(key);
25 * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26 * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27 * static_branch_likely()
28 * static_branch_unlikely()
30 * Jump labels provide an interface to generate dynamic branches using
31 * self-modifying code. Assuming toolchain and architecture support, if we
32 * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33 * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34 * (which defaults to false - and the true block is placed out of line).
35 * Similarly, we can define an initially true key via
36 * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37 * "if (static_branch_unlikely(&key))", in which case we will generate an
38 * unconditional branch to the out-of-line true branch. Keys that are
39 * initially true or false can be using in both static_branch_unlikely()
40 * and static_branch_likely() statements.
42 * At runtime we can change the branch target by setting the key
43 * to true via a call to static_branch_enable(), or false using
44 * static_branch_disable(). If the direction of the branch is switched by
45 * these calls then we run-time modify the branch target via a
46 * no-op -> jump or jump -> no-op conversion. For example, for an
47 * initially false key that is used in an "if (static_branch_unlikely(&key))"
48 * statement, setting the key to true requires us to patch in a jump
49 * to the out-of-line of true branch.
51 * In addition to static_branch_{enable,disable}, we can also reference count
52 * the key or branch direction via static_branch_{inc,dec}. Thus,
53 * static_branch_inc() can be thought of as a 'make more true' and
54 * static_branch_dec() as a 'make more false'.
56 * Since this relies on modifying code, the branch modifying functions
57 * must be considered absolute slow paths (machine wide synchronization etc.).
58 * OTOH, since the affected branches are unconditional, their runtime overhead
59 * will be absolutely minimal, esp. in the default (off) case where the total
60 * effect is a single NOP of appropriate size. The on case will patch in a jump
61 * to the out-of-line block.
63 * When the control is directly exposed to userspace, it is prudent to delay the
64 * decrement to avoid high frequency code modifications which can (and do)
65 * cause significant performance degradation. Struct static_key_deferred and
66 * static_key_slow_dec_deferred() provide for this.
68 * Lacking toolchain and or architecture support, static keys fall back to a
69 * simple conditional branch.
71 * Additional babbling in: Documentation/staging/static-keys.rst
76 #include <linux/types.h>
77 #include <linux/compiler.h>
79 extern bool static_key_initialized
;
81 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
82 "%s(): static key '%pS' used before call to jump_label_init()", \
87 #ifdef CONFIG_JUMP_LABEL
90 * To make anonymous unions work with old compilers, the static
91 * initialization of them requires brackets. This creates a dependency
92 * on the order of the struct with the initializers. If any fields
93 * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
96 * bit 0 => 1 if key is initially true
97 * 0 if initially false
98 * bit 1 => 1 if points to struct static_key_mod
99 * 0 if points to struct jump_entry
103 struct jump_entry
*entries
;
104 struct static_key_mod
*next
;
106 #endif /* CONFIG_JUMP_LABEL */
109 #endif /* __ASSEMBLY__ */
111 #ifdef CONFIG_JUMP_LABEL
112 #include <asm/jump_label.h>
115 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
120 long key
; // key may be far away from the core kernel under KASLR
123 static inline unsigned long jump_entry_code(const struct jump_entry
*entry
)
125 return (unsigned long)&entry
->code
+ entry
->code
;
128 static inline unsigned long jump_entry_target(const struct jump_entry
*entry
)
130 return (unsigned long)&entry
->target
+ entry
->target
;
133 static inline struct static_key
*jump_entry_key(const struct jump_entry
*entry
)
135 long offset
= entry
->key
& ~3L;
137 return (struct static_key
*)((unsigned long)&entry
->key
+ offset
);
142 static inline unsigned long jump_entry_code(const struct jump_entry
*entry
)
147 static inline unsigned long jump_entry_target(const struct jump_entry
*entry
)
149 return entry
->target
;
152 static inline struct static_key
*jump_entry_key(const struct jump_entry
*entry
)
154 return (struct static_key
*)((unsigned long)entry
->key
& ~3UL);
159 static inline bool jump_entry_is_branch(const struct jump_entry
*entry
)
161 return (unsigned long)entry
->key
& 1UL;
164 static inline bool jump_entry_is_init(const struct jump_entry
*entry
)
166 return (unsigned long)entry
->key
& 2UL;
169 static inline void jump_entry_set_init(struct jump_entry
*entry
, bool set
)
177 static inline int jump_entry_size(struct jump_entry
*entry
)
179 #ifdef JUMP_LABEL_NOP_SIZE
180 return JUMP_LABEL_NOP_SIZE
;
182 return arch_jump_entry_size(entry
);
191 enum jump_label_type
{
198 #ifdef CONFIG_JUMP_LABEL
200 #define JUMP_TYPE_FALSE 0UL
201 #define JUMP_TYPE_TRUE 1UL
202 #define JUMP_TYPE_LINKED 2UL
203 #define JUMP_TYPE_MASK 3UL
205 static __always_inline
bool static_key_false(struct static_key
*key
)
207 return arch_static_branch(key
, false);
210 static __always_inline
bool static_key_true(struct static_key
*key
)
212 return !arch_static_branch(key
, true);
215 extern struct jump_entry __start___jump_table
[];
216 extern struct jump_entry __stop___jump_table
[];
218 extern void jump_label_init(void);
219 extern void jump_label_init_ro(void);
220 extern void jump_label_lock(void);
221 extern void jump_label_unlock(void);
222 extern void arch_jump_label_transform(struct jump_entry
*entry
,
223 enum jump_label_type type
);
224 extern bool arch_jump_label_transform_queue(struct jump_entry
*entry
,
225 enum jump_label_type type
);
226 extern void arch_jump_label_transform_apply(void);
227 extern int jump_label_text_reserved(void *start
, void *end
);
228 extern bool static_key_slow_inc(struct static_key
*key
);
229 extern bool static_key_fast_inc_not_disabled(struct static_key
*key
);
230 extern void static_key_slow_dec(struct static_key
*key
);
231 extern bool static_key_slow_inc_cpuslocked(struct static_key
*key
);
232 extern void static_key_slow_dec_cpuslocked(struct static_key
*key
);
233 extern int static_key_count(struct static_key
*key
);
234 extern void static_key_enable(struct static_key
*key
);
235 extern void static_key_disable(struct static_key
*key
);
236 extern void static_key_enable_cpuslocked(struct static_key
*key
);
237 extern void static_key_disable_cpuslocked(struct static_key
*key
);
238 extern enum jump_label_type
jump_label_init_type(struct jump_entry
*entry
);
241 * We should be using ATOMIC_INIT() for initializing .enabled, but
242 * the inclusion of atomic.h is problematic for inclusion of jump_label.h
243 * in 'low-level' headers. Thus, we are initializing .enabled with a
244 * raw value, but have added a BUILD_BUG_ON() to catch any issues in
245 * jump_label_init() see: kernel/jump_label.c.
247 #define STATIC_KEY_INIT_TRUE \
248 { .enabled = { 1 }, \
249 { .type = JUMP_TYPE_TRUE } }
250 #define STATIC_KEY_INIT_FALSE \
251 { .enabled = { 0 }, \
252 { .type = JUMP_TYPE_FALSE } }
254 #else /* !CONFIG_JUMP_LABEL */
256 #include <linux/atomic.h>
257 #include <linux/bug.h>
259 static __always_inline
int static_key_count(struct static_key
*key
)
261 return raw_atomic_read(&key
->enabled
);
264 static __always_inline
void jump_label_init(void)
266 static_key_initialized
= true;
269 static __always_inline
void jump_label_init_ro(void) { }
271 static __always_inline
bool static_key_false(struct static_key
*key
)
273 if (unlikely_notrace(static_key_count(key
) > 0))
278 static __always_inline
bool static_key_true(struct static_key
*key
)
280 if (likely_notrace(static_key_count(key
) > 0))
285 static inline bool static_key_fast_inc_not_disabled(struct static_key
*key
)
289 STATIC_KEY_CHECK_USE(key
);
291 * Prevent key->enabled getting negative to follow the same semantics
292 * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
294 v
= atomic_read(&key
->enabled
);
296 if (v
< 0 || (v
+ 1) < 0)
298 } while (!likely(atomic_try_cmpxchg(&key
->enabled
, &v
, v
+ 1)));
301 #define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key)
303 static inline void static_key_slow_dec(struct static_key
*key
)
305 STATIC_KEY_CHECK_USE(key
);
306 atomic_dec(&key
->enabled
);
309 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
310 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
312 static inline int jump_label_text_reserved(void *start
, void *end
)
317 static inline void jump_label_lock(void) {}
318 static inline void jump_label_unlock(void) {}
320 static inline void static_key_enable(struct static_key
*key
)
322 STATIC_KEY_CHECK_USE(key
);
324 if (atomic_read(&key
->enabled
) != 0) {
325 WARN_ON_ONCE(atomic_read(&key
->enabled
) != 1);
328 atomic_set(&key
->enabled
, 1);
331 static inline void static_key_disable(struct static_key
*key
)
333 STATIC_KEY_CHECK_USE(key
);
335 if (atomic_read(&key
->enabled
) != 1) {
336 WARN_ON_ONCE(atomic_read(&key
->enabled
) != 0);
339 atomic_set(&key
->enabled
, 0);
342 #define static_key_enable_cpuslocked(k) static_key_enable((k))
343 #define static_key_disable_cpuslocked(k) static_key_disable((k))
345 #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
346 #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
348 #endif /* CONFIG_JUMP_LABEL */
350 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
351 #define jump_label_enabled static_key_enabled
353 /* -------------------------------------------------------------------------- */
356 * Two type wrappers around static_key, such that we can use compile time
357 * type differentiation to emit the right code.
359 * All the below code is macros in order to play type games.
362 struct static_key_true
{
363 struct static_key key
;
366 struct static_key_false
{
367 struct static_key key
;
370 #define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
371 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
373 #define DEFINE_STATIC_KEY_TRUE(name) \
374 struct static_key_true name = STATIC_KEY_TRUE_INIT
376 #define DEFINE_STATIC_KEY_TRUE_RO(name) \
377 struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
379 #define DECLARE_STATIC_KEY_TRUE(name) \
380 extern struct static_key_true name
382 #define DEFINE_STATIC_KEY_FALSE(name) \
383 struct static_key_false name = STATIC_KEY_FALSE_INIT
385 #define DEFINE_STATIC_KEY_FALSE_RO(name) \
386 struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
388 #define DECLARE_STATIC_KEY_FALSE(name) \
389 extern struct static_key_false name
391 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
392 struct static_key_true name[count] = { \
393 [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
396 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
397 struct static_key_false name[count] = { \
398 [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
401 #define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name)
402 #define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name)
403 #define DEFINE_STATIC_KEY_MAYBE(cfg, name) \
404 __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
406 #define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name)
407 #define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name)
408 #define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \
409 __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
411 #define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name)
412 #define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name)
413 #define DECLARE_STATIC_KEY_MAYBE(cfg, name) \
414 __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
416 extern bool ____wrong_branch_error(void);
418 #define static_key_enabled(x) \
420 if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \
421 !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
422 !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
423 ____wrong_branch_error(); \
424 static_key_count((struct static_key *)x) > 0; \
427 #ifdef CONFIG_JUMP_LABEL
430 * Combine the right initial value (type) with the right branch order
431 * to generate the desired result.
434 * type\branch| likely (1) | unlikely (0)
435 * -----------+-----------------------+------------------
437 * true (1) | ... | ...
439 * | <br-stmts> | 1: ...
445 * -----------+-----------------------+------------------
447 * false (0) | ... | ...
449 * | <br-stmts> | 1: ...
455 * -----------+-----------------------+------------------
457 * The initial value is encoded in the LSB of static_key::entries,
458 * type: 0 = false, 1 = true.
460 * The branch type is encoded in the LSB of jump_entry::key,
461 * branch: 0 = unlikely, 1 = likely.
463 * This gives the following logic table:
465 * enabled type branch instuction
466 * -----------------------------+-----------
477 * Which gives the following functions:
479 * dynamic: instruction = enabled ^ branch
480 * static: instruction = type ^ branch
482 * See jump_label_type() / jump_label_init_type().
485 #define static_branch_likely(x) \
488 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
489 branch = !arch_static_branch(&(x)->key, true); \
490 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
491 branch = !arch_static_branch_jump(&(x)->key, true); \
493 branch = ____wrong_branch_error(); \
494 likely_notrace(branch); \
497 #define static_branch_unlikely(x) \
500 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
501 branch = arch_static_branch_jump(&(x)->key, false); \
502 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
503 branch = arch_static_branch(&(x)->key, false); \
505 branch = ____wrong_branch_error(); \
506 unlikely_notrace(branch); \
509 #else /* !CONFIG_JUMP_LABEL */
511 #define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
512 #define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
514 #endif /* CONFIG_JUMP_LABEL */
516 #define static_branch_maybe(config, x) \
517 (IS_ENABLED(config) ? static_branch_likely(x) \
518 : static_branch_unlikely(x))
521 * Advanced usage; refcount, branch is enabled when: count != 0
524 #define static_branch_inc(x) static_key_slow_inc(&(x)->key)
525 #define static_branch_dec(x) static_key_slow_dec(&(x)->key)
526 #define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
527 #define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
530 * Normal usage; boolean enable/disable.
533 #define static_branch_enable(x) static_key_enable(&(x)->key)
534 #define static_branch_disable(x) static_key_disable(&(x)->key)
535 #define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
536 #define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
538 #endif /* __ASSEMBLY__ */
540 #endif /* _LINUX_JUMP_LABEL_H */