1 #define pr_fmt(fmt) "SMP alternatives: " fmt
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/mutex.h>
6 #include <linux/list.h>
7 #include <linux/stringify.h>
9 #include <linux/vmalloc.h>
10 #include <linux/memory.h>
11 #include <linux/stop_machine.h>
12 #include <linux/slab.h>
13 #include <linux/kdebug.h>
14 #include <linux/kprobes.h>
15 #include <asm/text-patching.h>
16 #include <asm/alternative.h>
17 #include <asm/sections.h>
18 #include <asm/pgtable.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
24 #include <asm/fixmap.h>
26 int __read_mostly alternatives_patched
;
28 EXPORT_SYMBOL_GPL(alternatives_patched
);
30 #define MAX_PATCH_LEN (255-1)
32 static int __initdata_or_module debug_alternative
;
34 static int __init
debug_alt(char *str
)
36 debug_alternative
= 1;
39 __setup("debug-alternative", debug_alt
);
41 static int noreplace_smp
;
43 static int __init
setup_noreplace_smp(char *str
)
48 __setup("noreplace-smp", setup_noreplace_smp
);
50 #define DPRINTK(fmt, args...) \
52 if (debug_alternative) \
53 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
56 #define DUMP_BYTES(buf, len, fmt, args...) \
58 if (unlikely(debug_alternative)) { \
64 printk(KERN_DEBUG fmt, ##args); \
65 for (j = 0; j < (len) - 1; j++) \
66 printk(KERN_CONT "%02hhx ", buf[j]); \
67 printk(KERN_CONT "%02hhx\n", buf[j]); \
72 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
73 * that correspond to that nop. Getting from one nop to the next, we
74 * add to the array the offset that is equal to the sum of all sizes of
75 * nops preceding the one we are after.
77 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
78 * nice symmetry of sizes of the previous nops.
80 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
81 static const unsigned char intelnops
[] =
93 static const unsigned char * const intel_nops
[ASM_NOP_MAX
+2] =
99 intelnops
+ 1 + 2 + 3,
100 intelnops
+ 1 + 2 + 3 + 4,
101 intelnops
+ 1 + 2 + 3 + 4 + 5,
102 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6,
103 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
104 intelnops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
109 static const unsigned char k8nops
[] =
121 static const unsigned char * const k8_nops
[ASM_NOP_MAX
+2] =
128 k8nops
+ 1 + 2 + 3 + 4,
129 k8nops
+ 1 + 2 + 3 + 4 + 5,
130 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6,
131 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
132 k8nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
136 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
137 static const unsigned char k7nops
[] =
149 static const unsigned char * const k7_nops
[ASM_NOP_MAX
+2] =
156 k7nops
+ 1 + 2 + 3 + 4,
157 k7nops
+ 1 + 2 + 3 + 4 + 5,
158 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6,
159 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
160 k7nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
165 static const unsigned char p6nops
[] =
177 static const unsigned char * const p6_nops
[ASM_NOP_MAX
+2] =
184 p6nops
+ 1 + 2 + 3 + 4,
185 p6nops
+ 1 + 2 + 3 + 4 + 5,
186 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6,
187 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7,
188 p6nops
+ 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
192 /* Initialize these to a safe default */
194 const unsigned char * const *ideal_nops
= p6_nops
;
196 const unsigned char * const *ideal_nops
= intel_nops
;
199 void __init
arch_init_ideal_nops(void)
201 switch (boot_cpu_data
.x86_vendor
) {
202 case X86_VENDOR_INTEL
:
204 * Due to a decoder implementation quirk, some
205 * specific Intel CPUs actually perform better with
206 * the "k8_nops" than with the SDM-recommended NOPs.
208 if (boot_cpu_data
.x86
== 6 &&
209 boot_cpu_data
.x86_model
>= 0x0f &&
210 boot_cpu_data
.x86_model
!= 0x1c &&
211 boot_cpu_data
.x86_model
!= 0x26 &&
212 boot_cpu_data
.x86_model
!= 0x27 &&
213 boot_cpu_data
.x86_model
< 0x30) {
214 ideal_nops
= k8_nops
;
215 } else if (boot_cpu_has(X86_FEATURE_NOPL
)) {
216 ideal_nops
= p6_nops
;
219 ideal_nops
= k8_nops
;
221 ideal_nops
= intel_nops
;
226 case X86_VENDOR_HYGON
:
227 ideal_nops
= p6_nops
;
231 if (boot_cpu_data
.x86
> 0xf) {
232 ideal_nops
= p6_nops
;
240 ideal_nops
= k8_nops
;
242 if (boot_cpu_has(X86_FEATURE_K8
))
243 ideal_nops
= k8_nops
;
244 else if (boot_cpu_has(X86_FEATURE_K7
))
245 ideal_nops
= k7_nops
;
247 ideal_nops
= intel_nops
;
252 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
253 static void __init_or_module
add_nops(void *insns
, unsigned int len
)
256 unsigned int noplen
= len
;
257 if (noplen
> ASM_NOP_MAX
)
258 noplen
= ASM_NOP_MAX
;
259 memcpy(insns
, ideal_nops
[noplen
], noplen
);
265 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
266 extern s32 __smp_locks
[], __smp_locks_end
[];
267 void *text_poke_early(void *addr
, const void *opcode
, size_t len
);
270 * Are we looking at a near JMP with a 1 or 4-byte displacement.
272 static inline bool is_jmp(const u8 opcode
)
274 return opcode
== 0xeb || opcode
== 0xe9;
277 static void __init_or_module
278 recompute_jump(struct alt_instr
*a
, u8
*orig_insn
, u8
*repl_insn
, u8
*insnbuf
)
280 u8
*next_rip
, *tgt_rip
;
284 if (a
->replacementlen
!= 5)
287 o_dspl
= *(s32
*)(insnbuf
+ 1);
289 /* next_rip of the replacement JMP */
290 next_rip
= repl_insn
+ a
->replacementlen
;
291 /* target rip of the replacement JMP */
292 tgt_rip
= next_rip
+ o_dspl
;
293 n_dspl
= tgt_rip
- orig_insn
;
295 DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip
, n_dspl
);
297 if (tgt_rip
- orig_insn
>= 0) {
298 if (n_dspl
- 2 <= 127)
302 /* negative offset */
304 if (((n_dspl
- 2) & 0xff) == (n_dspl
- 2))
314 insnbuf
[1] = (s8
)n_dspl
;
315 add_nops(insnbuf
+ 2, 3);
324 *(s32
*)&insnbuf
[1] = n_dspl
;
330 DPRINTK("final displ: 0x%08x, JMP 0x%lx",
331 n_dspl
, (unsigned long)orig_insn
+ n_dspl
+ repl_len
);
335 * "noinline" to cause control flow change and thus invalidate I$ and
336 * cause refetch after modification.
338 static void __init_or_module noinline
optimize_nops(struct alt_instr
*a
, u8
*instr
)
343 for (i
= 0; i
< a
->padlen
; i
++) {
344 if (instr
[i
] != 0x90)
348 local_irq_save(flags
);
349 add_nops(instr
+ (a
->instrlen
- a
->padlen
), a
->padlen
);
350 local_irq_restore(flags
);
352 DUMP_BYTES(instr
, a
->instrlen
, "%px: [%d:%d) optimized NOPs: ",
353 instr
, a
->instrlen
- a
->padlen
, a
->padlen
);
357 * Replace instructions with better alternatives for this CPU type. This runs
358 * before SMP is initialized to avoid SMP problems with self modifying code.
359 * This implies that asymmetric systems where APs have less capabilities than
360 * the boot processor are not handled. Tough. Make sure you disable such
363 * Marked "noinline" to cause control flow change and thus insn cache
364 * to refetch changed I$ lines.
366 void __init_or_module noinline
apply_alternatives(struct alt_instr
*start
,
367 struct alt_instr
*end
)
370 u8
*instr
, *replacement
;
371 u8 insnbuf
[MAX_PATCH_LEN
];
373 DPRINTK("alt table %px, -> %px", start
, end
);
375 * The scan order should be from start to end. A later scanned
376 * alternative code can overwrite previously scanned alternative code.
377 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
380 * So be careful if you want to change the scan order to any other
383 for (a
= start
; a
< end
; a
++) {
386 instr
= (u8
*)&a
->instr_offset
+ a
->instr_offset
;
387 replacement
= (u8
*)&a
->repl_offset
+ a
->repl_offset
;
388 BUG_ON(a
->instrlen
> sizeof(insnbuf
));
389 BUG_ON(a
->cpuid
>= (NCAPINTS
+ NBUGINTS
) * 32);
390 if (!boot_cpu_has(a
->cpuid
)) {
392 optimize_nops(a
, instr
);
397 DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
400 instr
, instr
, a
->instrlen
,
401 replacement
, a
->replacementlen
, a
->padlen
);
403 DUMP_BYTES(instr
, a
->instrlen
, "%px: old_insn: ", instr
);
404 DUMP_BYTES(replacement
, a
->replacementlen
, "%px: rpl_insn: ", replacement
);
406 memcpy(insnbuf
, replacement
, a
->replacementlen
);
407 insnbuf_sz
= a
->replacementlen
;
410 * 0xe8 is a relative jump; fix the offset.
412 * Instruction length is checked before the opcode to avoid
413 * accessing uninitialized bytes for zero-length replacements.
415 if (a
->replacementlen
== 5 && *insnbuf
== 0xe8) {
416 *(s32
*)(insnbuf
+ 1) += replacement
- instr
;
417 DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
418 *(s32
*)(insnbuf
+ 1),
419 (unsigned long)instr
+ *(s32
*)(insnbuf
+ 1) + 5);
422 if (a
->replacementlen
&& is_jmp(replacement
[0]))
423 recompute_jump(a
, instr
, replacement
, insnbuf
);
425 if (a
->instrlen
> a
->replacementlen
) {
426 add_nops(insnbuf
+ a
->replacementlen
,
427 a
->instrlen
- a
->replacementlen
);
428 insnbuf_sz
+= a
->instrlen
- a
->replacementlen
;
430 DUMP_BYTES(insnbuf
, insnbuf_sz
, "%px: final_insn: ", instr
);
432 text_poke_early(instr
, insnbuf
, insnbuf_sz
);
437 static void alternatives_smp_lock(const s32
*start
, const s32
*end
,
438 u8
*text
, u8
*text_end
)
442 for (poff
= start
; poff
< end
; poff
++) {
443 u8
*ptr
= (u8
*)poff
+ *poff
;
445 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
447 /* turn DS segment override prefix into lock prefix */
449 text_poke(ptr
, ((unsigned char []){0xf0}), 1);
453 static void alternatives_smp_unlock(const s32
*start
, const s32
*end
,
454 u8
*text
, u8
*text_end
)
458 for (poff
= start
; poff
< end
; poff
++) {
459 u8
*ptr
= (u8
*)poff
+ *poff
;
461 if (!*poff
|| ptr
< text
|| ptr
>= text_end
)
463 /* turn lock prefix into DS segment override prefix */
465 text_poke(ptr
, ((unsigned char []){0x3E}), 1);
469 struct smp_alt_module
{
470 /* what is this ??? */
474 /* ptrs to lock prefixes */
476 const s32
*locks_end
;
478 /* .text segment, needed to avoid patching init code ;) */
482 struct list_head next
;
484 static LIST_HEAD(smp_alt_modules
);
485 static bool uniproc_patched
= false; /* protected by text_mutex */
487 void __init_or_module
alternatives_smp_module_add(struct module
*mod
,
489 void *locks
, void *locks_end
,
490 void *text
, void *text_end
)
492 struct smp_alt_module
*smp
;
494 mutex_lock(&text_mutex
);
495 if (!uniproc_patched
)
498 if (num_possible_cpus() == 1)
499 /* Don't bother remembering, we'll never have to undo it. */
502 smp
= kzalloc(sizeof(*smp
), GFP_KERNEL
);
504 /* we'll run the (safe but slow) SMP code then ... */
510 smp
->locks_end
= locks_end
;
512 smp
->text_end
= text_end
;
513 DPRINTK("locks %p -> %p, text %p -> %p, name %s\n",
514 smp
->locks
, smp
->locks_end
,
515 smp
->text
, smp
->text_end
, smp
->name
);
517 list_add_tail(&smp
->next
, &smp_alt_modules
);
519 alternatives_smp_unlock(locks
, locks_end
, text
, text_end
);
521 mutex_unlock(&text_mutex
);
524 void __init_or_module
alternatives_smp_module_del(struct module
*mod
)
526 struct smp_alt_module
*item
;
528 mutex_lock(&text_mutex
);
529 list_for_each_entry(item
, &smp_alt_modules
, next
) {
530 if (mod
!= item
->mod
)
532 list_del(&item
->next
);
536 mutex_unlock(&text_mutex
);
539 void alternatives_enable_smp(void)
541 struct smp_alt_module
*mod
;
543 /* Why bother if there are no other CPUs? */
544 BUG_ON(num_possible_cpus() == 1);
546 mutex_lock(&text_mutex
);
548 if (uniproc_patched
) {
549 pr_info("switching to SMP code\n");
550 BUG_ON(num_online_cpus() != 1);
551 clear_cpu_cap(&boot_cpu_data
, X86_FEATURE_UP
);
552 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP
);
553 list_for_each_entry(mod
, &smp_alt_modules
, next
)
554 alternatives_smp_lock(mod
->locks
, mod
->locks_end
,
555 mod
->text
, mod
->text_end
);
556 uniproc_patched
= false;
558 mutex_unlock(&text_mutex
);
562 * Return 1 if the address range is reserved for SMP-alternatives.
563 * Must hold text_mutex.
565 int alternatives_text_reserved(void *start
, void *end
)
567 struct smp_alt_module
*mod
;
569 u8
*text_start
= start
;
572 lockdep_assert_held(&text_mutex
);
574 list_for_each_entry(mod
, &smp_alt_modules
, next
) {
575 if (mod
->text
> text_end
|| mod
->text_end
< text_start
)
577 for (poff
= mod
->locks
; poff
< mod
->locks_end
; poff
++) {
578 const u8
*ptr
= (const u8
*)poff
+ *poff
;
580 if (text_start
<= ptr
&& text_end
> ptr
)
587 #endif /* CONFIG_SMP */
589 #ifdef CONFIG_PARAVIRT
590 void __init_or_module
apply_paravirt(struct paravirt_patch_site
*start
,
591 struct paravirt_patch_site
*end
)
593 struct paravirt_patch_site
*p
;
594 char insnbuf
[MAX_PATCH_LEN
];
596 for (p
= start
; p
< end
; p
++) {
599 BUG_ON(p
->len
> MAX_PATCH_LEN
);
600 /* prep the buffer with the original instructions */
601 memcpy(insnbuf
, p
->instr
, p
->len
);
602 used
= pv_ops
.init
.patch(p
->instrtype
, insnbuf
,
603 (unsigned long)p
->instr
, p
->len
);
605 BUG_ON(used
> p
->len
);
607 /* Pad the rest with nops */
608 add_nops(insnbuf
+ used
, p
->len
- used
);
609 text_poke_early(p
->instr
, insnbuf
, p
->len
);
612 extern struct paravirt_patch_site __start_parainstructions
[],
613 __stop_parainstructions
[];
614 #endif /* CONFIG_PARAVIRT */
616 void __init
alternative_instructions(void)
618 /* The patching is not fully atomic, so try to avoid local interruptions
619 that might execute the to be patched code.
620 Other CPUs are not running. */
624 * Don't stop machine check exceptions while patching.
625 * MCEs only happen when something got corrupted and in this
626 * case we must do something about the corruption.
627 * Ignoring it is worse than a unlikely patching race.
628 * Also machine checks tend to be broadcast and if one CPU
629 * goes into machine check the others follow quickly, so we don't
630 * expect a machine check to cause undue problems during to code
634 apply_alternatives(__alt_instructions
, __alt_instructions_end
);
637 /* Patch to UP if other cpus not imminent. */
638 if (!noreplace_smp
&& (num_present_cpus() == 1 || setup_max_cpus
<= 1)) {
639 uniproc_patched
= true;
640 alternatives_smp_module_add(NULL
, "core kernel",
641 __smp_locks
, __smp_locks_end
,
645 if (!uniproc_patched
|| num_possible_cpus() == 1)
646 free_init_pages("SMP alternatives",
647 (unsigned long)__smp_locks
,
648 (unsigned long)__smp_locks_end
);
651 apply_paravirt(__parainstructions
, __parainstructions_end
);
654 alternatives_patched
= 1;
658 * text_poke_early - Update instructions on a live kernel at boot time
659 * @addr: address to modify
660 * @opcode: source of the copy
661 * @len: length to copy
663 * When you use this code to patch more than one byte of an instruction
664 * you need to make sure that other CPUs cannot execute this code in parallel.
665 * Also no thread must be currently preempted in the middle of these
666 * instructions. And on the local CPU you need to be protected again NMI or MCE
667 * handlers seeing an inconsistent instruction while you patch.
669 void *__init_or_module
text_poke_early(void *addr
, const void *opcode
,
673 local_irq_save(flags
);
674 memcpy(addr
, opcode
, len
);
675 local_irq_restore(flags
);
677 /* Could also do a CLFLUSH here to speed up CPU recovery; but
678 that causes hangs on some VIA CPUs. */
683 * text_poke - Update instructions on a live kernel
684 * @addr: address to modify
685 * @opcode: source of the copy
686 * @len: length to copy
688 * Only atomic text poke/set should be allowed when not doing early patching.
689 * It means the size must be writable atomically and the address must be aligned
690 * in a way that permits an atomic write. It also makes sure we fit on a single
693 void *text_poke(void *addr
, const void *opcode
, size_t len
)
697 struct page
*pages
[2];
701 * While boot memory allocator is runnig we cannot use struct
702 * pages as they are not yet initialized.
704 BUG_ON(!after_bootmem
);
706 lockdep_assert_held(&text_mutex
);
708 if (!core_kernel_text((unsigned long)addr
)) {
709 pages
[0] = vmalloc_to_page(addr
);
710 pages
[1] = vmalloc_to_page(addr
+ PAGE_SIZE
);
712 pages
[0] = virt_to_page(addr
);
713 WARN_ON(!PageReserved(pages
[0]));
714 pages
[1] = virt_to_page(addr
+ PAGE_SIZE
);
717 local_irq_save(flags
);
718 set_fixmap(FIX_TEXT_POKE0
, page_to_phys(pages
[0]));
720 set_fixmap(FIX_TEXT_POKE1
, page_to_phys(pages
[1]));
721 vaddr
= (char *)fix_to_virt(FIX_TEXT_POKE0
);
722 memcpy(&vaddr
[(unsigned long)addr
& ~PAGE_MASK
], opcode
, len
);
723 clear_fixmap(FIX_TEXT_POKE0
);
725 clear_fixmap(FIX_TEXT_POKE1
);
728 /* Could also do a CLFLUSH here to speed up CPU recovery; but
729 that causes hangs on some VIA CPUs. */
730 for (i
= 0; i
< len
; i
++)
731 BUG_ON(((char *)addr
)[i
] != ((char *)opcode
)[i
]);
732 local_irq_restore(flags
);
736 static void do_sync_core(void *info
)
741 static bool bp_patching_in_progress
;
742 static void *bp_int3_handler
, *bp_int3_addr
;
744 int poke_int3_handler(struct pt_regs
*regs
)
747 * Having observed our INT3 instruction, we now must observe
748 * bp_patching_in_progress.
750 * in_progress = TRUE INT3
752 * write INT3 if (in_progress)
754 * Idem for bp_int3_handler.
758 if (likely(!bp_patching_in_progress
))
761 if (user_mode(regs
) || regs
->ip
!= (unsigned long)bp_int3_addr
)
764 /* set up the specified breakpoint handler */
765 regs
->ip
= (unsigned long) bp_int3_handler
;
769 NOKPROBE_SYMBOL(poke_int3_handler
);
772 * text_poke_bp() -- update instructions on live kernel on SMP
773 * @addr: address to patch
774 * @opcode: opcode of new instruction
775 * @len: length to copy
776 * @handler: address to jump to when the temporary breakpoint is hit
778 * Modify multi-byte instruction by using int3 breakpoint on SMP.
779 * We completely avoid stop_machine() here, and achieve the
780 * synchronization using int3 breakpoint.
782 * The way it is done:
783 * - add a int3 trap to the address that will be patched
785 * - update all but the first byte of the patched range
787 * - replace the first byte (int3) by the first byte of
791 void *text_poke_bp(void *addr
, const void *opcode
, size_t len
, void *handler
)
793 unsigned char int3
= 0xcc;
795 bp_int3_handler
= handler
;
796 bp_int3_addr
= (u8
*)addr
+ sizeof(int3
);
797 bp_patching_in_progress
= true;
799 lockdep_assert_held(&text_mutex
);
802 * Corresponding read barrier in int3 notifier for making sure the
803 * in_progress and handler are correctly ordered wrt. patching.
807 text_poke(addr
, &int3
, sizeof(int3
));
809 on_each_cpu(do_sync_core
, NULL
, 1);
811 if (len
- sizeof(int3
) > 0) {
812 /* patch all but the first byte */
813 text_poke((char *)addr
+ sizeof(int3
),
814 (const char *) opcode
+ sizeof(int3
),
817 * According to Intel, this core syncing is very likely
818 * not necessary and we'd be safe even without it. But
819 * better safe than sorry (plus there's not only Intel).
821 on_each_cpu(do_sync_core
, NULL
, 1);
824 /* patch the first byte */
825 text_poke(addr
, opcode
, sizeof(int3
));
827 on_each_cpu(do_sync_core
, NULL
, 1);
829 * sync_core() implies an smp_mb() and orders this store against
830 * the writing of the new instruction.
832 bp_patching_in_progress
= false;