WIP FPC-III support
[linux/fpc-iii.git] / arch / x86 / include / asm / text-patching.h
blobb7421780e4e92959689646cb425485c9deeabfc3
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TEXT_PATCHING_H
3 #define _ASM_X86_TEXT_PATCHING_H
5 #include <linux/types.h>
6 #include <linux/stddef.h>
7 #include <asm/ptrace.h>
9 struct paravirt_patch_site;
10 #ifdef CONFIG_PARAVIRT
11 void apply_paravirt(struct paravirt_patch_site *start,
12 struct paravirt_patch_site *end);
13 #else
14 static inline void apply_paravirt(struct paravirt_patch_site *start,
15 struct paravirt_patch_site *end)
17 #define __parainstructions NULL
18 #define __parainstructions_end NULL
19 #endif
22 * Currently, the max observed size in the kernel code is
23 * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
24 * Raise it if needed.
26 #define POKE_MAX_OPCODE_SIZE 5
28 extern void text_poke_early(void *addr, const void *opcode, size_t len);
31 * Clear and restore the kernel write-protection flag on the local CPU.
32 * Allows the kernel to edit read-only pages.
33 * Side-effect: any interrupt handler running between save and restore will have
34 * the ability to write to read-only pages.
36 * Warning:
37 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
38 * no thread can be preempted in the instructions being modified (no iret to an
39 * invalid instruction possible) or if the instructions are changed from a
40 * consistent state to another consistent state atomically.
41 * On the local CPU you need to be protected against NMI or MCE handlers seeing
42 * an inconsistent instruction while you patch.
44 extern void *text_poke(void *addr, const void *opcode, size_t len);
45 extern void text_poke_sync(void);
46 extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
47 extern int poke_int3_handler(struct pt_regs *regs);
48 extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
50 extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
51 extern void text_poke_finish(void);
53 #define INT3_INSN_SIZE 1
54 #define INT3_INSN_OPCODE 0xCC
56 #define RET_INSN_SIZE 1
57 #define RET_INSN_OPCODE 0xC3
59 #define CALL_INSN_SIZE 5
60 #define CALL_INSN_OPCODE 0xE8
62 #define JMP32_INSN_SIZE 5
63 #define JMP32_INSN_OPCODE 0xE9
65 #define JMP8_INSN_SIZE 2
66 #define JMP8_INSN_OPCODE 0xEB
68 #define DISP32_SIZE 4
70 static __always_inline int text_opcode_size(u8 opcode)
72 int size = 0;
74 #define __CASE(insn) \
75 case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break
77 switch(opcode) {
78 __CASE(INT3);
79 __CASE(RET);
80 __CASE(CALL);
81 __CASE(JMP32);
82 __CASE(JMP8);
85 #undef __CASE
87 return size;
90 union text_poke_insn {
91 u8 text[POKE_MAX_OPCODE_SIZE];
92 struct {
93 u8 opcode;
94 s32 disp;
95 } __attribute__((packed));
98 static __always_inline
99 void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
101 static union text_poke_insn insn; /* per instance */
102 int size = text_opcode_size(opcode);
104 insn.opcode = opcode;
106 if (size > 1) {
107 insn.disp = (long)dest - (long)(addr + size);
108 if (size == 2) {
110 * Ensure that for JMP9 the displacement
111 * actually fits the signed byte.
113 BUG_ON((insn.disp >> 31) != (insn.disp >> 7));
117 return &insn.text;
120 extern int after_bootmem;
121 extern __ro_after_init struct mm_struct *poking_mm;
122 extern __ro_after_init unsigned long poking_addr;
124 #ifndef CONFIG_UML_X86
125 static __always_inline
126 void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
128 regs->ip = ip;
131 static __always_inline
132 void int3_emulate_push(struct pt_regs *regs, unsigned long val)
135 * The int3 handler in entry_64.S adds a gap between the
136 * stack where the break point happened, and the saving of
137 * pt_regs. We can extend the original stack because of
138 * this gap. See the idtentry macro's create_gap option.
140 * Similarly entry_32.S will have a gap on the stack for (any) hardware
141 * exception and pt_regs; see FIXUP_FRAME.
143 regs->sp -= sizeof(unsigned long);
144 *(unsigned long *)regs->sp = val;
147 static __always_inline
148 unsigned long int3_emulate_pop(struct pt_regs *regs)
150 unsigned long val = *(unsigned long *)regs->sp;
151 regs->sp += sizeof(unsigned long);
152 return val;
155 static __always_inline
156 void int3_emulate_call(struct pt_regs *regs, unsigned long func)
158 int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
159 int3_emulate_jmp(regs, func);
162 static __always_inline
163 void int3_emulate_ret(struct pt_regs *regs)
165 unsigned long ip = int3_emulate_pop(regs);
166 int3_emulate_jmp(regs, ip);
168 #endif /* !CONFIG_UML_X86 */
170 #endif /* _ASM_X86_TEXT_PATCHING_H */