1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) "SMP alternatives: " fmt
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/perf_event.h>
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/stringify.h>
10 #include <linux/highmem.h>
12 #include <linux/vmalloc.h>
13 #include <linux/memory.h>
14 #include <linux/stop_machine.h>
15 #include <linux/slab.h>
16 #include <linux/kdebug.h>
17 #include <linux/kprobes.h>
18 #include <linux/mmu_context.h>
19 #include <linux/bsearch.h>
20 #include <linux/sync_core.h>
21 #include <asm/text-patching.h>
22 #include <asm/alternative.h>
23 #include <asm/sections.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
30 #include <asm/fixmap.h>
32 int __read_mostly alternatives_patched
;
34 EXPORT_SYMBOL_GPL(alternatives_patched
);
36 #define MAX_PATCH_LEN (255-1)
38 static int __initdata_or_module debug_alternative
;
40 static int __init
debug_alt(char *str
)
42 debug_alternative
= 1;
45 __setup("debug-alternative", debug_alt
);
47 static int noreplace_smp
;
49 static int __init
setup_noreplace_smp(char *str
)
54 __setup("noreplace-smp", setup_noreplace_smp
);
56 #define DPRINTK(fmt, args...) \
58 if (debug_alternative) \
59 printk(KERN_DEBUG pr_fmt(fmt) "\n", ##args); \
62 #define DUMP_BYTES(buf, len, fmt, args...) \
64 if (unlikely(debug_alternative)) { \
70 printk(KERN_DEBUG pr_fmt(fmt), ##args); \
71 for (j = 0; j < (len) - 1; j++) \
72 printk(KERN_CONT "%02hhx ", buf[j]); \
73 printk(KERN_CONT "%02hhx\n", buf[j]); \
78 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
79 * that correspond to that nop. Getting from one nop to the next, we
80 * add to the array the offset that is equal to the sum of all sizes of
81 * nops preceding the one we are after.
83 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
84 * nice symmetry of sizes of the previous nops.
86 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
87 static const unsigned char intelnops
[] =
99 static const unsigned char * const intel_nops
[ASM_NOP_MAX
+2] =
105 intelnops
+ 1 + 2 + 3,
106 intelnops
+ 1 + 2 + 3 + 4,
107 intelnops
+ 1 + 2 + 3 + 4 + 5,
108 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6,
109 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
110 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
115 static const unsigned char k8nops
[] =
127 static const unsigned char * const k8_nops
[ASM_NOP_MAX
+2] =
134 k8nops
+ 1 + 2 + 3 + 4,
135 k8nops
+ 1 + 2 + 3 + 4 + 5,
136 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6,
137 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
138 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
142 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
143 static const unsigned char k7nops
[] =
155 static const unsigned char * const k7_nops
[ASM_NOP_MAX
+2] =
162 k7nops
+ 1 + 2 + 3 + 4,
163 k7nops
+ 1 + 2 + 3 + 4 + 5,
164 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6,
165 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
166 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
171 static const unsigned char p6nops
[] =
183 static const unsigned char * const p6_nops
[ASM_NOP_MAX
+2] =
190 p6nops
+ 1 + 2 + 3 + 4,
191 p6nops
+ 1 + 2 + 3 + 4 + 5,
192 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6,
193 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
194 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
198 /* Initialize these to a safe default */
200 const unsigned char * const *ideal_nops
= p6_nops
;
202 const unsigned char * const *ideal_nops
= intel_nops
;
205 void __init
arch_init_ideal_nops(void)
207 switch (boot_cpu_data
.x86_vendor
) {
208 case X86_VENDOR_INTEL
:
210 * Due to a decoder implementation quirk, some
211 * specific Intel CPUs actually perform better with
212 * the "k8_nops" than with the SDM-recommended NOPs.
214 if (boot_cpu_data
.x86
== 6 &&
215 boot_cpu_data
.x86_model
>= 0x0f &&
216 boot_cpu_data
.x86_model
!= 0x1c &&
217 boot_cpu_data
.x86_model
!= 0x26 &&
218 boot_cpu_data
.x86_model
!= 0x27 &&
219 boot_cpu_data
.x86_model
< 0x30) {
220 ideal_nops
= k8_nops
;
221 } else if (boot_cpu_has(X86_FEATURE_NOPL
)) {
222 ideal_nops
= p6_nops
;
225 ideal_nops
= k8_nops
;
227 ideal_nops
= intel_nops
;
232 case X86_VENDOR_HYGON
:
233 ideal_nops
= p6_nops
;
237 if (boot_cpu_data
.x86
> 0xf) {
238 ideal_nops
= p6_nops
;
246 ideal_nops
= k8_nops
;
248 if (boot_cpu_has(X86_FEATURE_K8
))
249 ideal_nops
= k8_nops
;
250 else if (boot_cpu_has(X86_FEATURE_K7
))
251 ideal_nops
= k7_nops
;
253 ideal_nops
= intel_nops
;
258 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
259 static void __init_or_module
add_nops(void *insns
, unsigned int len
)
262 unsigned int noplen
= len
;
263 if (noplen
> ASM_NOP_MAX
)
264 noplen
= ASM_NOP_MAX
;
265 memcpy(insns
, ideal_nops
[noplen
], noplen
);
271 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
272 extern s32 __smp_locks
[], __smp_locks_end
[];
273 void text_poke_early(void *addr
, const void *opcode
, size_t len
);
276 * Are we looking at a near JMP with a 1 or 4-byte displacement.
278 static inline bool is_jmp(const u8 opcode
)
280 return opcode
== 0xeb || opcode
== 0xe9;
283 static void __init_or_module
284 recompute_jump(struct alt_instr
*a
, u8
*orig_insn
, u8
*repl_insn
, u8
*insn_buff
)
286 u8
*next_rip
, *tgt_rip
;
290 if (a
->replacementlen
!= 5)
293 o_dspl
= *(s32
*)(insn_buff
+ 1);
295 /* next_rip of the replacement JMP */
296 next_rip
= repl_insn
+ a
->replacementlen
;
297 /* target rip of the replacement JMP */
298 tgt_rip
= next_rip
+ o_dspl
;
299 n_dspl
= tgt_rip
- orig_insn
;
301 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip
, n_dspl
);
303 if (tgt_rip
- orig_insn
>= 0) {
304 if (n_dspl
- 2 <= 127)
308 /* negative offset */
310 if (((n_dspl
- 2) & 0xff) == (n_dspl
- 2))
320 insn_buff
[1] = (s8
)n_dspl
;
321 add_nops(insn_buff
+ 2, 3);
330 *(s32
*)&insn_buff
[1] = n_dspl
;
336 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
337 n_dspl
, (unsigned long)orig_insn
+ n_dspl
+ repl_len
);
341 * "noinline" to cause control flow change and thus invalidate I$ and
342 * cause refetch after modification.
344 static void __init_or_module noinline
optimize_nops(struct alt_instr
*a
, u8
*instr
)
349 for (i
= 0; i
< a
->padlen
; i
++) {
350 if (instr
[i
] != 0x90)
354 local_irq_save(flags
);
355 add_nops(instr
+ (a
->instrlen
- a
->padlen
), a
->padlen
);
356 local_irq_restore(flags
);
358 DUMP_BYTES(instr
, a
->instrlen
, "%px: [%d:%d) optimized NOPs: ",
359 instr
, a
->instrlen
- a
->padlen
, a
->padlen
);
363 * Replace instructions with better alternatives for this CPU type. This runs
364 * before SMP is initialized to avoid SMP problems with self modifying code.
365 * This implies that asymmetric systems where APs have less capabilities than
366 * the boot processor are not handled. Tough. Make sure you disable such
369 * Marked "noinline" to cause control flow change and thus insn cache
370 * to refetch changed I$ lines.
372 void __init_or_module noinline
apply_alternatives(struct alt_instr
*start
,
373 struct alt_instr
*end
)
376 u8
*instr
, *replacement
;
377 u8 insn_buff
[MAX_PATCH_LEN
];
379 DPRINTK("alt table %px, -> %px", start
, end
);
381 * The scan order should be from start to end. A later scanned
382 * alternative code can overwrite previously scanned alternative code.
383 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
386 * So be careful if you want to change the scan order to any other
389 for (a
= start
; a
< end
; a
++) {
390 int insn_buff_sz
= 0;
392 instr
= (u8
*)&a
->instr_offset
+ a
->instr_offset
;
393 replacement
= (u8
*)&a
->repl_offset
+ a
->repl_offset
;
394 BUG_ON(a
->instrlen
> sizeof(insn_buff
));
395 BUG_ON(a
->cpuid
>= (NCAPINTS
+ NBUGINTS
) * 32);
396 if (!boot_cpu_has(a
->cpuid
)) {
398 optimize_nops(a
, instr
);
403 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
406 instr
, instr
, a
->instrlen
,
407 replacement
, a
->replacementlen
, a
->padlen
);
409 DUMP_BYTES(instr
, a
->instrlen
, "%px: old_insn: ", instr
);
410 DUMP_BYTES(replacement
, a
->replacementlen
, "%px: rpl_insn: ", replacement
);
412 memcpy(insn_buff
, replacement
, a
->replacementlen
);
413 insn_buff_sz
= a
->replacementlen
;
416 * 0xe8 is a relative jump; fix the offset.
418 * Instruction length is checked before the opcode to avoid
419 * accessing uninitialized bytes for zero-length replacements.
421 if (a
->replacementlen
== 5 && *insn_buff
== 0xe8) {
422 *(s32
*)(insn_buff
+ 1) += replacement
- instr
;
423 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
424 *(s32
*)(insn_buff
+ 1),
425 (unsigned long)instr
+ *(s32
*)(insn_buff
+ 1) + 5);
428 if (a
->replacementlen
&& is_jmp(replacement
[0]))
429 recompute_jump(a
, instr
, replacement
, insn_buff
);
431 if (a
->instrlen
> a
->replacementlen
) {
432 add_nops(insn_buff
+ a
->replacementlen
,
433 a
->instrlen
- a
->replacementlen
);
434 insn_buff_sz
+= a
->instrlen
- a
->replacementlen
;
436 DUMP_BYTES(insn_buff
, insn_buff_sz
, "%px: final_insn: ", instr
);
438 text_poke_early(instr
, insn_buff
, insn_buff_sz
);
443 static void alternatives_smp_lock(const s32
*start
, const s32
*end
,
444 u8
*text
, u8
*text_end
)
448 for (poff
= start
; poff
< end
; poff
++) {
449 u8
*ptr
= (u8
*)poff
+ *poff
;
451 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
453 /* turn DS segment override prefix into lock prefix */
455 text_poke(ptr
, ((unsigned char []){0xf0}), 1);
459 static void alternatives_smp_unlock(const s32
*start
, const s32
*end
,
460 u8
*text
, u8
*text_end
)
464 for (poff
= start
; poff
< end
; poff
++) {
465 u8
*ptr
= (u8
*)poff
+ *poff
;
467 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
469 /* turn lock prefix into DS segment override prefix */
471 text_poke(ptr
, ((unsigned char []){0x3E}), 1);
475 struct smp_alt_module
{
476 /* what is this ??? */
480 /* ptrs to lock prefixes */
482 const s32
*locks_end
;
484 /* .text segment, needed to avoid patching init code ;) */
488 struct list_head next
;
490 static LIST_HEAD(smp_alt_modules
);
491 static bool uniproc_patched
= false; /* protected by text_mutex */
493 void __init_or_module
alternatives_smp_module_add(struct module
*mod
,
495 void *locks
, void *locks_end
,
496 void *text
, void *text_end
)
498 struct smp_alt_module
*smp
;
500 mutex_lock(&text_mutex
);
501 if (!uniproc_patched
)
504 if (num_possible_cpus() == 1)
505 /* Don't bother remembering, we'll never have to undo it. */
508 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
510 /* we'll run the (safe but slow) SMP code then ... */
516 smp
->locks_end
= locks_end
;
518 smp
->text_end
= text_end
;
519 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
520 smp
->locks
, smp
->locks_end
,
521 smp
->text
, smp
->text_end
, smp
->name
);
523 list_add_tail(&smp
->next
, &smp_alt_modules
);
525 alternatives_smp_unlock(locks
, locks_end
, text
, text_end
);
527 mutex_unlock(&text_mutex
);
530 void __init_or_module
alternatives_smp_module_del(struct module
*mod
)
532 struct smp_alt_module
*item
;
534 mutex_lock(&text_mutex
);
535 list_for_each_entry(item
, &smp_alt_modules
, next
) {
536 if (mod
!= item
->mod
)
538 list_del(&item
->next
);
542 mutex_unlock(&text_mutex
);
545 void alternatives_enable_smp(void)
547 struct smp_alt_module
*mod
;
549 /* Why bother if there are no other CPUs? */
550 BUG_ON(num_possible_cpus() == 1);
552 mutex_lock(&text_mutex
);
554 if (uniproc_patched
) {
555 pr_info("switching to SMP code\n");
556 BUG_ON(num_online_cpus() != 1);
557 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
558 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
559 list_for_each_entry(mod
, &smp_alt_modules
, next
)
560 alternatives_smp_lock(mod
->locks
, mod
->locks_end
,
561 mod
->text
, mod
->text_end
);
562 uniproc_patched
= false;
564 mutex_unlock(&text_mutex
);
568 * Return 1 if the address range is reserved for SMP-alternatives.
569 * Must hold text_mutex.
571 int alternatives_text_reserved(void *start
, void *end
)
573 struct smp_alt_module
*mod
;
575 u8
*text_start
= start
;
578 lockdep_assert_held(&text_mutex
);
580 list_for_each_entry(mod
, &smp_alt_modules
, next
) {
581 if (mod
->text
> text_end
|| mod
->text_end
< text_start
)
583 for (poff
= mod
->locks
; poff
< mod
->locks_end
; poff
++) {
584 const u8
*ptr
= (const u8
*)poff
+ *poff
;
586 if (text_start
<= ptr
&& text_end
> ptr
)
593 #endif /* CONFIG_SMP */
595 #ifdef CONFIG_PARAVIRT
596 void __init_or_module
apply_paravirt(struct paravirt_patch_site
*start
,
597 struct paravirt_patch_site
*end
)
599 struct paravirt_patch_site
*p
;
600 char insn_buff
[MAX_PATCH_LEN
];
602 for (p
= start
; p
< end
; p
++) {
605 BUG_ON(p
->len
> MAX_PATCH_LEN
);
606 /* prep the buffer with the original instructions */
607 memcpy(insn_buff
, p
->instr
, p
->len
);
608 used
= pv_ops
.init
.patch(p
->type
, insn_buff
, (unsigned long)p
->instr
, p
->len
);
610 BUG_ON(used
> p
->len
);
612 /* Pad the rest with nops */
613 add_nops(insn_buff
+ used
, p
->len
- used
);
614 text_poke_early(p
->instr
, insn_buff
, p
->len
);
617 extern struct paravirt_patch_site __start_parainstructions
[],
618 __stop_parainstructions
[];
619 #endif /* CONFIG_PARAVIRT */
622 * Self-test for the INT3 based CALL emulation code.
624 * This exercises int3_emulate_call() to make sure INT3 pt_regs are set up
625 * properly and that there is a stack gap between the INT3 frame and the
626 * previous context. Without this gap doing a virtual PUSH on the interrupted
627 * stack would corrupt the INT3 IRET frame.
629 * See entry_{32,64}.S for more details.
633 * We define the int3_magic() function in assembly to control the calling
634 * convention such that we can 'call' it from assembly.
637 extern void int3_magic(unsigned int *ptr
); /* defined in asm */
640 " .pushsection .init.text, \"ax\", @progbits\n"
641 " .type int3_magic, @function\n"
643 " movl $1, (%" _ASM_ARG1
")\n"
645 " .size int3_magic, .-int3_magic\n"
649 extern __initdata
unsigned long int3_selftest_ip
; /* defined in asm below */
652 int3_exception_notify(struct notifier_block
*self
, unsigned long val
, void *data
)
654 struct die_args
*args
= data
;
655 struct pt_regs
*regs
= args
->regs
;
657 if (!regs
|| user_mode(regs
))
663 if (regs
->ip
- INT3_INSN_SIZE
!= int3_selftest_ip
)
666 int3_emulate_call(regs
, (unsigned long)&int3_magic
);
670 static void __init
int3_selftest(void)
672 static __initdata
struct notifier_block int3_exception_nb
= {
673 .notifier_call
= int3_exception_notify
,
674 .priority
= INT_MAX
-1, /* last */
676 unsigned int val
= 0;
678 BUG_ON(register_die_notifier(&int3_exception_nb
));
681 * Basically: int3_magic(&val); but really complicated :-)
683 * Stick the address of the INT3 instruction into int3_selftest_ip,
684 * then trigger the INT3, padded with NOPs to match a CALL instruction
687 asm volatile ("1: int3; nop; nop; nop; nop\n\t"
688 ".pushsection .init.data,\"aw\"\n\t"
689 ".align " __ASM_SEL(4, 8) "\n\t"
690 ".type int3_selftest_ip, @object\n\t"
691 ".size int3_selftest_ip, " __ASM_SEL(4, 8) "\n\t"
692 "int3_selftest_ip:\n\t"
693 __ASM_SEL(.long, .quad
) " 1b\n\t"
695 : ASM_CALL_CONSTRAINT
696 : __ASM_SEL_RAW(a
, D
) (&val
)
701 unregister_die_notifier(&int3_exception_nb
);
704 void __init
alternative_instructions(void)
709 * The patching is not fully atomic, so try to avoid local
710 * interruptions that might execute the to be patched code.
711 * Other CPUs are not running.
716 * Don't stop machine check exceptions while patching.
717 * MCEs only happen when something got corrupted and in this
718 * case we must do something about the corruption.
719 * Ignoring it is worse than an unlikely patching race.
720 * Also machine checks tend to be broadcast and if one CPU
721 * goes into machine check the others follow quickly, so we don't
722 * expect a machine check to cause undue problems during to code
726 apply_alternatives(__alt_instructions
, __alt_instructions_end
);
729 /* Patch to UP if other cpus not imminent. */
730 if (!noreplace_smp
&& (num_present_cpus() == 1 || setup_max_cpus
<= 1)) {
731 uniproc_patched
= true;
732 alternatives_smp_module_add(NULL
, "core kernel",
733 __smp_locks
, __smp_locks_end
,
737 if (!uniproc_patched
|| num_possible_cpus() == 1) {
738 free_init_pages("SMP alternatives",
739 (unsigned long)__smp_locks
,
740 (unsigned long)__smp_locks_end
);
744 apply_paravirt(__parainstructions
, __parainstructions_end
);
747 alternatives_patched
= 1;
751 * text_poke_early - Update instructions on a live kernel at boot time
752 * @addr: address to modify
753 * @opcode: source of the copy
754 * @len: length to copy
756 * When you use this code to patch more than one byte of an instruction
757 * you need to make sure that other CPUs cannot execute this code in parallel.
758 * Also no thread must be currently preempted in the middle of these
759 * instructions. And on the local CPU you need to be protected against NMI or
760 * MCE handlers seeing an inconsistent instruction while you patch.
762 void __init_or_module
text_poke_early(void *addr
, const void *opcode
,
767 if (boot_cpu_has(X86_FEATURE_NX
) &&
768 is_module_text_address((unsigned long)addr
)) {
770 * Modules text is marked initially as non-executable, so the
771 * code cannot be running and speculative code-fetches are
772 * prevented. Just change the code.
774 memcpy(addr
, opcode
, len
);
776 local_irq_save(flags
);
777 memcpy(addr
, opcode
, len
);
778 local_irq_restore(flags
);
782 * Could also do a CLFLUSH here to speed up CPU recovery; but
783 * that causes hangs on some VIA CPUs.
789 struct mm_struct
*mm
;
793 * Using a temporary mm allows to set temporary mappings that are not accessible
794 * by other CPUs. Such mappings are needed to perform sensitive memory writes
795 * that override the kernel memory protections (e.g., W^X), without exposing the
796 * temporary page-table mappings that are required for these write operations to
797 * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
798 * mapping is torn down.
800 * Context: The temporary mm needs to be used exclusively by a single core. To
801 * harden security IRQs must be disabled while the temporary mm is
802 * loaded, thereby preventing interrupt handler bugs from overriding
803 * the kernel memory protection.
805 static inline temp_mm_state_t
use_temporary_mm(struct mm_struct
*mm
)
807 temp_mm_state_t temp_state
;
809 lockdep_assert_irqs_disabled();
812 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
813 * with a stale address space WITHOUT being in lazy mode after
814 * restoring the previous mm.
816 if (this_cpu_read(cpu_tlbstate
.is_lazy
))
817 leave_mm(smp_processor_id());
819 temp_state
.mm
= this_cpu_read(cpu_tlbstate
.loaded_mm
);
820 switch_mm_irqs_off(NULL
, mm
, current
);
823 * If breakpoints are enabled, disable them while the temporary mm is
824 * used. Userspace might set up watchpoints on addresses that are used
825 * in the temporary mm, which would lead to wrong signals being sent or
828 * Note that breakpoints are not disabled selectively, which also causes
829 * kernel breakpoints (e.g., perf's) to be disabled. This might be
830 * undesirable, but still seems reasonable as the code that runs in the
831 * temporary mm should be short.
833 if (hw_breakpoint_active())
834 hw_breakpoint_disable();
839 static inline void unuse_temporary_mm(temp_mm_state_t prev_state
)
841 lockdep_assert_irqs_disabled();
842 switch_mm_irqs_off(NULL
, prev_state
.mm
, current
);
845 * Restore the breakpoints if they were disabled before the temporary mm
848 if (hw_breakpoint_active())
849 hw_breakpoint_restore();
852 __ro_after_init
struct mm_struct
*poking_mm
;
853 __ro_after_init
unsigned long poking_addr
;
855 static void *__text_poke(void *addr
, const void *opcode
, size_t len
)
857 bool cross_page_boundary
= offset_in_page(addr
) + len
> PAGE_SIZE
;
858 struct page
*pages
[2] = {NULL
};
859 temp_mm_state_t prev
;
866 * While boot memory allocator is running we cannot use struct pages as
867 * they are not yet initialized. There is no way to recover.
869 BUG_ON(!after_bootmem
);
871 if (!core_kernel_text((unsigned long)addr
)) {
872 pages
[0] = vmalloc_to_page(addr
);
873 if (cross_page_boundary
)
874 pages
[1] = vmalloc_to_page(addr
+ PAGE_SIZE
);
876 pages
[0] = virt_to_page(addr
);
877 WARN_ON(!PageReserved(pages
[0]));
878 if (cross_page_boundary
)
879 pages
[1] = virt_to_page(addr
+ PAGE_SIZE
);
882 * If something went wrong, crash and burn since recovery paths are not
885 BUG_ON(!pages
[0] || (cross_page_boundary
&& !pages
[1]));
888 * Map the page without the global bit, as TLB flushing is done with
889 * flush_tlb_mm_range(), which is intended for non-global PTEs.
891 pgprot
= __pgprot(pgprot_val(PAGE_KERNEL
) & ~_PAGE_GLOBAL
);
894 * The lock is not really needed, but this allows to avoid open-coding.
896 ptep
= get_locked_pte(poking_mm
, poking_addr
, &ptl
);
899 * This must not fail; preallocated in poking_init().
903 local_irq_save(flags
);
905 pte
= mk_pte(pages
[0], pgprot
);
906 set_pte_at(poking_mm
, poking_addr
, ptep
, pte
);
908 if (cross_page_boundary
) {
909 pte
= mk_pte(pages
[1], pgprot
);
910 set_pte_at(poking_mm
, poking_addr
+ PAGE_SIZE
, ptep
+ 1, pte
);
914 * Loading the temporary mm behaves as a compiler barrier, which
915 * guarantees that the PTE will be set at the time memcpy() is done.
917 prev
= use_temporary_mm(poking_mm
);
919 kasan_disable_current();
920 memcpy((u8
*)poking_addr
+ offset_in_page(addr
), opcode
, len
);
921 kasan_enable_current();
924 * Ensure that the PTE is only cleared after the instructions of memcpy
925 * were issued by using a compiler barrier.
929 pte_clear(poking_mm
, poking_addr
, ptep
);
930 if (cross_page_boundary
)
931 pte_clear(poking_mm
, poking_addr
+ PAGE_SIZE
, ptep
+ 1);
934 * Loading the previous page-table hierarchy requires a serializing
935 * instruction that already allows the core to see the updated version.
936 * Xen-PV is assumed to serialize execution in a similar manner.
938 unuse_temporary_mm(prev
);
941 * Flushing the TLB might involve IPIs, which would require enabled
942 * IRQs, but not if the mm is not used, as it is in this point.
944 flush_tlb_mm_range(poking_mm
, poking_addr
, poking_addr
+
945 (cross_page_boundary
? 2 : 1) * PAGE_SIZE
,
949 * If the text does not match what we just wrote then something is
950 * fundamentally screwy; there's nothing we can really do about that.
952 BUG_ON(memcmp(addr
, opcode
, len
));
954 local_irq_restore(flags
);
955 pte_unmap_unlock(ptep
, ptl
);
960 * text_poke - Update instructions on a live kernel
961 * @addr: address to modify
962 * @opcode: source of the copy
963 * @len: length to copy
965 * Only atomic text poke/set should be allowed when not doing early patching.
966 * It means the size must be writable atomically and the address must be aligned
967 * in a way that permits an atomic write. It also makes sure we fit on a single
970 * Note that the caller must ensure that if the modified code is part of a
971 * module, the module would not be removed during poking. This can be achieved
972 * by registering a module notifier, and ordering module removal and patching
975 void *text_poke(void *addr
, const void *opcode
, size_t len
)
977 lockdep_assert_held(&text_mutex
);
979 return __text_poke(addr
, opcode
, len
);
983 * text_poke_kgdb - Update instructions on a live kernel by kgdb
984 * @addr: address to modify
985 * @opcode: source of the copy
986 * @len: length to copy
988 * Only atomic text poke/set should be allowed when not doing early patching.
989 * It means the size must be writable atomically and the address must be aligned
990 * in a way that permits an atomic write. It also makes sure we fit on a single
993 * Context: should only be used by kgdb, which ensures no other core is running,
994 * despite the fact it does not hold the text_mutex.
996 void *text_poke_kgdb(void *addr
, const void *opcode
, size_t len
)
998 return __text_poke(addr
, opcode
, len
);
1001 static void do_sync_core(void *info
)
1006 void text_poke_sync(void)
1008 on_each_cpu(do_sync_core
, NULL
, 1);
1011 struct text_poke_loc
{
1012 s32 rel_addr
; /* addr := _stext + rel_addr */
1015 const u8 text
[POKE_MAX_OPCODE_SIZE
];
1019 struct bp_patching_desc
{
1020 struct text_poke_loc
*vec
;
1025 static struct bp_patching_desc
*bp_desc
;
1027 static __always_inline
1028 struct bp_patching_desc
*try_get_desc(struct bp_patching_desc
**descp
)
1030 struct bp_patching_desc
*desc
= __READ_ONCE(*descp
); /* rcu_dereference */
1032 if (!desc
|| !arch_atomic_inc_not_zero(&desc
->refs
))
1038 static __always_inline
void put_desc(struct bp_patching_desc
*desc
)
1040 smp_mb__before_atomic();
1041 arch_atomic_dec(&desc
->refs
);
1044 static __always_inline
void *text_poke_addr(struct text_poke_loc
*tp
)
1046 return _stext
+ tp
->rel_addr
;
1049 static __always_inline
int patch_cmp(const void *key
, const void *elt
)
1051 struct text_poke_loc
*tp
= (struct text_poke_loc
*) elt
;
1053 if (key
< text_poke_addr(tp
))
1055 if (key
> text_poke_addr(tp
))
1060 noinstr
int poke_int3_handler(struct pt_regs
*regs
)
1062 struct bp_patching_desc
*desc
;
1063 struct text_poke_loc
*tp
;
1067 if (user_mode(regs
))
1071 * Having observed our INT3 instruction, we now must observe
1074 * bp_desc = desc INT3
1076 * write INT3 if (desc)
1080 desc
= try_get_desc(&bp_desc
);
1085 * Discount the INT3. See text_poke_bp_batch().
1087 ip
= (void *) regs
->ip
- INT3_INSN_SIZE
;
1090 * Skip the binary search if there is a single member in the vector.
1092 if (unlikely(desc
->nr_entries
> 1)) {
1093 tp
= __inline_bsearch(ip
, desc
->vec
, desc
->nr_entries
,
1094 sizeof(struct text_poke_loc
),
1100 if (text_poke_addr(tp
) != ip
)
1104 len
= text_opcode_size(tp
->opcode
);
1107 switch (tp
->opcode
) {
1108 case INT3_INSN_OPCODE
:
1110 * Someone poked an explicit INT3, they'll want to handle it,
1115 case RET_INSN_OPCODE
:
1116 int3_emulate_ret(regs
);
1119 case CALL_INSN_OPCODE
:
1120 int3_emulate_call(regs
, (long)ip
+ tp
->rel32
);
1123 case JMP32_INSN_OPCODE
:
1124 case JMP8_INSN_OPCODE
:
1125 int3_emulate_jmp(regs
, (long)ip
+ tp
->rel32
);
1139 #define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
1140 static struct text_poke_loc tp_vec
[TP_VEC_MAX
];
1141 static int tp_vec_nr
;
1144 * text_poke_bp_batch() -- update instructions on live kernel on SMP
1145 * @tp: vector of instructions to patch
1146 * @nr_entries: number of entries in the vector
1148 * Modify multi-byte instruction by using int3 breakpoint on SMP.
1149 * We completely avoid stop_machine() here, and achieve the
1150 * synchronization using int3 breakpoint.
1152 * The way it is done:
1153 * - For each entry in the vector:
1154 * - add a int3 trap to the address that will be patched
1156 * - For each entry in the vector:
1157 * - update all but the first byte of the patched range
1159 * - For each entry in the vector:
1160 * - replace the first byte (int3) by the first byte of
1164 static void text_poke_bp_batch(struct text_poke_loc
*tp
, unsigned int nr_entries
)
1166 struct bp_patching_desc desc
= {
1168 .nr_entries
= nr_entries
,
1169 .refs
= ATOMIC_INIT(1),
1171 unsigned char int3
= INT3_INSN_OPCODE
;
1175 lockdep_assert_held(&text_mutex
);
1177 smp_store_release(&bp_desc
, &desc
); /* rcu_assign_pointer */
1180 * Corresponding read barrier in int3 notifier for making sure the
1181 * nr_entries and handler are correctly ordered wrt. patching.
1186 * First step: add a int3 trap to the address that will be patched.
1188 for (i
= 0; i
< nr_entries
; i
++) {
1189 tp
[i
].old
= *(u8
*)text_poke_addr(&tp
[i
]);
1190 text_poke(text_poke_addr(&tp
[i
]), &int3
, INT3_INSN_SIZE
);
1196 * Second step: update all but the first byte of the patched range.
1198 for (do_sync
= 0, i
= 0; i
< nr_entries
; i
++) {
1199 u8 old
[POKE_MAX_OPCODE_SIZE
] = { tp
[i
].old
, };
1200 int len
= text_opcode_size(tp
[i
].opcode
);
1202 if (len
- INT3_INSN_SIZE
> 0) {
1203 memcpy(old
+ INT3_INSN_SIZE
,
1204 text_poke_addr(&tp
[i
]) + INT3_INSN_SIZE
,
1205 len
- INT3_INSN_SIZE
);
1206 text_poke(text_poke_addr(&tp
[i
]) + INT3_INSN_SIZE
,
1207 (const char *)tp
[i
].text
+ INT3_INSN_SIZE
,
1208 len
- INT3_INSN_SIZE
);
1213 * Emit a perf event to record the text poke, primarily to
1214 * support Intel PT decoding which must walk the executable code
1215 * to reconstruct the trace. The flow up to here is:
1218 * - write instruction tail
1219 * At this point the actual control flow will be through the
1220 * INT3 and handler and not hit the old or new instruction.
1221 * Intel PT outputs FUP/TIP packets for the INT3, so the flow
1222 * can still be decoded. Subsequently:
1223 * - emit RECORD_TEXT_POKE with the new instruction
1225 * - write first byte
1227 * So before the text poke event timestamp, the decoder will see
1228 * either the old instruction flow or FUP/TIP of INT3. After the
1229 * text poke event timestamp, the decoder will see either the
1230 * new instruction flow or FUP/TIP of INT3. Thus decoders can
1231 * use the timestamp as the point at which to modify the
1233 * The old instruction is recorded so that the event can be
1234 * processed forwards or backwards.
1236 perf_event_text_poke(text_poke_addr(&tp
[i
]), old
, len
,
1242 * According to Intel, this core syncing is very likely
1243 * not necessary and we'd be safe even without it. But
1244 * better safe than sorry (plus there's not only Intel).
1250 * Third step: replace the first byte (int3) by the first byte of
1253 for (do_sync
= 0, i
= 0; i
< nr_entries
; i
++) {
1254 if (tp
[i
].text
[0] == INT3_INSN_OPCODE
)
1257 text_poke(text_poke_addr(&tp
[i
]), tp
[i
].text
, INT3_INSN_SIZE
);
1265 * Remove and synchronize_rcu(), except we have a very primitive
1266 * refcount based completion.
1268 WRITE_ONCE(bp_desc
, NULL
); /* RCU_INIT_POINTER */
1269 if (!atomic_dec_and_test(&desc
.refs
))
1270 atomic_cond_read_acquire(&desc
.refs
, !VAL
);
1273 static void text_poke_loc_init(struct text_poke_loc
*tp
, void *addr
,
1274 const void *opcode
, size_t len
, const void *emulate
)
1278 memcpy((void *)tp
->text
, opcode
, len
);
1282 kernel_insn_init(&insn
, emulate
, MAX_INSN_SIZE
);
1283 insn_get_length(&insn
);
1285 BUG_ON(!insn_complete(&insn
));
1286 BUG_ON(len
!= insn
.length
);
1288 tp
->rel_addr
= addr
- (void *)_stext
;
1289 tp
->opcode
= insn
.opcode
.bytes
[0];
1291 switch (tp
->opcode
) {
1292 case INT3_INSN_OPCODE
:
1293 case RET_INSN_OPCODE
:
1296 case CALL_INSN_OPCODE
:
1297 case JMP32_INSN_OPCODE
:
1298 case JMP8_INSN_OPCODE
:
1299 tp
->rel32
= insn
.immediate
.value
;
1302 default: /* assume NOP */
1304 case 2: /* NOP2 -- emulate as JMP8+0 */
1305 BUG_ON(memcmp(emulate
, ideal_nops
[len
], len
));
1306 tp
->opcode
= JMP8_INSN_OPCODE
;
1310 case 5: /* NOP5 -- emulate as JMP32+0 */
1311 BUG_ON(memcmp(emulate
, ideal_nops
[NOP_ATOMIC5
], len
));
1312 tp
->opcode
= JMP32_INSN_OPCODE
;
1316 default: /* unknown instruction */
1324 * We hard rely on the tp_vec being ordered; ensure this is so by flushing
1327 static bool tp_order_fail(void *addr
)
1329 struct text_poke_loc
*tp
;
1334 if (!addr
) /* force */
1337 tp
= &tp_vec
[tp_vec_nr
- 1];
1338 if ((unsigned long)text_poke_addr(tp
) > (unsigned long)addr
)
1344 static void text_poke_flush(void *addr
)
1346 if (tp_vec_nr
== TP_VEC_MAX
|| tp_order_fail(addr
)) {
1347 text_poke_bp_batch(tp_vec
, tp_vec_nr
);
1352 void text_poke_finish(void)
1354 text_poke_flush(NULL
);
1357 void __ref
text_poke_queue(void *addr
, const void *opcode
, size_t len
, const void *emulate
)
1359 struct text_poke_loc
*tp
;
1361 if (unlikely(system_state
== SYSTEM_BOOTING
)) {
1362 text_poke_early(addr
, opcode
, len
);
1366 text_poke_flush(addr
);
1368 tp
= &tp_vec
[tp_vec_nr
++];
1369 text_poke_loc_init(tp
, addr
, opcode
, len
, emulate
);
1373 * text_poke_bp() -- update instructions on live kernel on SMP
1374 * @addr: address to patch
1375 * @opcode: opcode of new instruction
1376 * @len: length to copy
1377 * @emulate: instruction to be emulated
1379 * Update a single instruction with the vector in the stack, avoiding
1380 * dynamically allocated memory. This function should be used when it is
1381 * not possible to allocate memory.
1383 void __ref
text_poke_bp(void *addr
, const void *opcode
, size_t len
, const void *emulate
)
1385 struct text_poke_loc tp
;
1387 if (unlikely(system_state
== SYSTEM_BOOTING
)) {
1388 text_poke_early(addr
, opcode
, len
);
1392 text_poke_loc_init(&tp
, addr
, opcode
, len
, emulate
);
1393 text_poke_bp_batch(&tp
, 1);