1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_ARC_JUMP_LABEL_H
3 #define _ASM_ARC_JUMP_LABEL_H
7 #include <linux/stringify.h>
8 #include <linux/types.h>
10 #define JUMP_LABEL_NOP_SIZE 4
13 * NOTE about '.balign 4':
15 * To make atomic update of patched instruction available we need to guarantee
16 * that this instruction doesn't cross L1 cache line boundary.
18 * As of today we simply align instruction which can be patched by 4 byte using
19 * ".balign 4" directive. In that case patched instruction is aligned with one
20 * 16-bit NOP_S if this is required.
21 * However 'align by 4' directive is much stricter than it actually required.
22 * It's enough that our 32-bit instruction don't cross L1 cache line boundary /
23 * L1 I$ fetch block boundary which can be achieved by using
24 * ".bundle_align_mode" assembler directive. That will save us from adding
25 * useless NOP_S padding in most of the cases.
27 * TODO: switch to ".bundle_align_mode" directive using whin it will be
28 * supported by ARC toolchain.
31 static __always_inline
bool arch_static_branch(struct static_key
*key
,
34 asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE
)" \n"
37 ".pushsection __jump_table, \"aw\" \n"
38 ".word 1b, %l[l_yes], %c0 \n"
40 : : "i" (&((char *)key
)[branch
]) : : l_yes
);
47 static __always_inline
bool arch_static_branch_jump(struct static_key
*key
,
50 asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE
)" \n"
53 ".pushsection __jump_table, \"aw\" \n"
54 ".word 1b, %l[l_yes], %c0 \n"
56 : : "i" (&((char *)key
)[branch
]) : : l_yes
);
63 typedef u32 jump_label_t
;
71 #endif /* __ASSEMBLY__ */