1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2008 Michael Ellerman, IBM Corporation.
6 #include <linux/kprobes.h>
7 #include <linux/mmu_context.h>
8 #include <linux/random.h>
9 #include <linux/vmalloc.h>
10 #include <linux/init.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/uaccess.h>
13 #include <linux/jump_label.h>
15 #include <asm/debug.h>
16 #include <asm/pgalloc.h>
18 #include <asm/tlbflush.h>
20 #include <asm/text-patching.h>
23 static int __patch_mem(void *exec_addr
, unsigned long val
, void *patch_addr
, bool is_dword
)
25 if (!IS_ENABLED(CONFIG_PPC64
) || likely(!is_dword
)) {
26 /* For big endian correctness: plain address would use the wrong half */
29 __put_kernel_nofault(patch_addr
, &val32
, u32
, failed
);
31 __put_kernel_nofault(patch_addr
, &val
, u64
, failed
);
34 asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr
),
44 int raw_patch_instruction(u32
*addr
, ppc_inst_t instr
)
46 if (ppc_inst_prefixed(instr
))
47 return __patch_mem(addr
, ppc_inst_as_ulong(instr
), addr
, true);
49 return __patch_mem(addr
, ppc_inst_val(instr
), addr
, false);
52 struct patch_context
{
54 struct vm_struct
*area
;
61 static DEFINE_PER_CPU(struct patch_context
, cpu_patching_context
);
63 static int map_patch_area(void *addr
, unsigned long text_poke_addr
);
64 static void unmap_patch_area(unsigned long addr
);
66 static bool mm_patch_enabled(void)
68 return IS_ENABLED(CONFIG_SMP
) && radix_enabled();
72 * The following applies for Radix MMU. Hash MMU has different requirements,
73 * and so is not supported.
75 * Changing mm requires context synchronising instructions on both sides of
76 * the context switch, as well as a hwsync between the last instruction for
77 * which the address of an associated storage access was translated using
78 * the current context.
80 * switch_mm_irqs_off() performs an isync after the context switch. It is
81 * the responsibility of the caller to perform the CSI and hwsync before
82 * starting/stopping the temp mm.
84 static struct mm_struct
*start_using_temp_mm(struct mm_struct
*temp_mm
)
86 struct mm_struct
*orig_mm
= current
->active_mm
;
88 lockdep_assert_irqs_disabled();
89 switch_mm_irqs_off(orig_mm
, temp_mm
, current
);
91 WARN_ON(!mm_is_thread_local(temp_mm
));
93 suspend_breakpoints();
97 static void stop_using_temp_mm(struct mm_struct
*temp_mm
,
98 struct mm_struct
*orig_mm
)
100 lockdep_assert_irqs_disabled();
101 switch_mm_irqs_off(temp_mm
, orig_mm
, current
);
102 restore_breakpoints();
105 static int text_area_cpu_up(unsigned int cpu
)
107 struct vm_struct
*area
;
111 area
= get_vm_area(PAGE_SIZE
, VM_ALLOC
);
113 WARN_ONCE(1, "Failed to create text area for cpu %d\n",
118 // Map/unmap the area to ensure all page tables are pre-allocated
119 addr
= (unsigned long)area
->addr
;
120 err
= map_patch_area(empty_zero_page
, addr
);
124 unmap_patch_area(addr
);
126 this_cpu_write(cpu_patching_context
.area
, area
);
127 this_cpu_write(cpu_patching_context
.addr
, addr
);
128 this_cpu_write(cpu_patching_context
.pte
, virt_to_kpte(addr
));
133 static int text_area_cpu_down(unsigned int cpu
)
135 free_vm_area(this_cpu_read(cpu_patching_context
.area
));
136 this_cpu_write(cpu_patching_context
.area
, NULL
);
137 this_cpu_write(cpu_patching_context
.addr
, 0);
138 this_cpu_write(cpu_patching_context
.pte
, NULL
);
142 static void put_patching_mm(struct mm_struct
*mm
, unsigned long patching_addr
)
144 struct mmu_gather tlb
;
146 tlb_gather_mmu(&tlb
, mm
);
147 free_pgd_range(&tlb
, patching_addr
, patching_addr
+ PAGE_SIZE
, 0, 0);
151 static int text_area_cpu_up_mm(unsigned int cpu
)
153 struct mm_struct
*mm
;
163 * Choose a random page-aligned address from the interval
164 * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE].
165 * The lower address bound is PAGE_SIZE to avoid the zero-page.
167 addr
= (1 + (get_random_long() % (DEFAULT_MAP_WINDOW
/ PAGE_SIZE
- 2))) << PAGE_SHIFT
;
170 * PTE allocation uses GFP_KERNEL which means we need to
171 * pre-allocate the PTE here because we cannot do the
172 * allocation during patching when IRQs are disabled.
174 * Using get_locked_pte() to avoid open coding, the lock
177 pte
= get_locked_pte(mm
, addr
, &ptl
);
180 pte_unmap_unlock(pte
, ptl
);
182 this_cpu_write(cpu_patching_context
.mm
, mm
);
183 this_cpu_write(cpu_patching_context
.addr
, addr
);
188 put_patching_mm(mm
, addr
);
193 static int text_area_cpu_down_mm(unsigned int cpu
)
195 put_patching_mm(this_cpu_read(cpu_patching_context
.mm
),
196 this_cpu_read(cpu_patching_context
.addr
));
198 this_cpu_write(cpu_patching_context
.mm
, NULL
);
199 this_cpu_write(cpu_patching_context
.addr
, 0);
204 static __ro_after_init
DEFINE_STATIC_KEY_FALSE(poking_init_done
);
206 void __init
poking_init(void)
210 if (mm_patch_enabled())
211 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
,
212 "powerpc/text_poke_mm:online",
214 text_area_cpu_down_mm
);
216 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
,
217 "powerpc/text_poke:online",
221 /* cpuhp_setup_state returns >= 0 on success */
222 if (WARN_ON(ret
< 0))
225 static_branch_enable(&poking_init_done
);
228 static unsigned long get_patch_pfn(void *addr
)
230 if (IS_ENABLED(CONFIG_EXECMEM
) && is_vmalloc_or_module_addr(addr
))
231 return vmalloc_to_pfn(addr
);
233 return __pa_symbol(addr
) >> PAGE_SHIFT
;
237 * This can be called for kernel text or a module.
239 static int map_patch_area(void *addr
, unsigned long text_poke_addr
)
241 unsigned long pfn
= get_patch_pfn(addr
);
243 return map_kernel_page(text_poke_addr
, (pfn
<< PAGE_SHIFT
), PAGE_KERNEL
);
246 static void unmap_patch_area(unsigned long addr
)
254 pgdp
= pgd_offset_k(addr
);
255 if (WARN_ON(pgd_none(*pgdp
)))
258 p4dp
= p4d_offset(pgdp
, addr
);
259 if (WARN_ON(p4d_none(*p4dp
)))
262 pudp
= pud_offset(p4dp
, addr
);
263 if (WARN_ON(pud_none(*pudp
)))
266 pmdp
= pmd_offset(pudp
, addr
);
267 if (WARN_ON(pmd_none(*pmdp
)))
270 ptep
= pte_offset_kernel(pmdp
, addr
);
271 if (WARN_ON(pte_none(*ptep
)))
275 * In hash, pte_clear flushes the tlb, in radix, we have to
277 pte_clear(&init_mm
, addr
, ptep
);
278 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
);
281 static int __do_patch_mem_mm(void *addr
, unsigned long val
, bool is_dword
)
285 unsigned long text_poke_addr
;
287 unsigned long pfn
= get_patch_pfn(addr
);
288 struct mm_struct
*patching_mm
;
289 struct mm_struct
*orig_mm
;
292 patching_mm
= __this_cpu_read(cpu_patching_context
.mm
);
293 text_poke_addr
= __this_cpu_read(cpu_patching_context
.addr
);
294 patch_addr
= (u32
*)(text_poke_addr
+ offset_in_page(addr
));
296 pte
= get_locked_pte(patching_mm
, text_poke_addr
, &ptl
);
300 __set_pte_at(patching_mm
, text_poke_addr
, pte
, pfn_pte(pfn
, PAGE_KERNEL
), 0);
302 /* order PTE update before use, also serves as the hwsync */
303 asm volatile("ptesync": : :"memory");
305 /* order context switch after arbitrary prior code */
308 orig_mm
= start_using_temp_mm(patching_mm
);
310 err
= __patch_mem(addr
, val
, patch_addr
, is_dword
);
312 /* context synchronisation performed by __patch_instruction (isync or exception) */
313 stop_using_temp_mm(patching_mm
, orig_mm
);
315 pte_clear(patching_mm
, text_poke_addr
, pte
);
317 * ptesync to order PTE update before TLB invalidation done
318 * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
320 local_flush_tlb_page_psize(patching_mm
, text_poke_addr
, mmu_virtual_psize
);
322 pte_unmap_unlock(pte
, ptl
);
327 static int __do_patch_mem(void *addr
, unsigned long val
, bool is_dword
)
331 unsigned long text_poke_addr
;
333 unsigned long pfn
= get_patch_pfn(addr
);
335 text_poke_addr
= (unsigned long)__this_cpu_read(cpu_patching_context
.addr
) & PAGE_MASK
;
336 patch_addr
= (u32
*)(text_poke_addr
+ offset_in_page(addr
));
338 pte
= __this_cpu_read(cpu_patching_context
.pte
);
339 __set_pte_at(&init_mm
, text_poke_addr
, pte
, pfn_pte(pfn
, PAGE_KERNEL
), 0);
340 /* See ptesync comment in radix__set_pte_at() */
342 asm volatile("ptesync": : :"memory");
344 err
= __patch_mem(addr
, val
, patch_addr
, is_dword
);
346 pte_clear(&init_mm
, text_poke_addr
, pte
);
347 flush_tlb_kernel_range(text_poke_addr
, text_poke_addr
+ PAGE_SIZE
);
352 static int patch_mem(void *addr
, unsigned long val
, bool is_dword
)
358 * During early early boot patch_instruction is called
359 * when text_poke_area is not ready, but we still need
360 * to allow patching. We just do the plain old patching
362 if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX
) ||
363 !static_branch_likely(&poking_init_done
))
364 return __patch_mem(addr
, val
, addr
, is_dword
);
366 local_irq_save(flags
);
367 if (mm_patch_enabled())
368 err
= __do_patch_mem_mm(addr
, val
, is_dword
);
370 err
= __do_patch_mem(addr
, val
, is_dword
);
371 local_irq_restore(flags
);
378 int patch_instruction(u32
*addr
, ppc_inst_t instr
)
380 if (ppc_inst_prefixed(instr
))
381 return patch_mem(addr
, ppc_inst_as_ulong(instr
), true);
383 return patch_mem(addr
, ppc_inst_val(instr
), false);
385 NOKPROBE_SYMBOL(patch_instruction
);
387 int patch_uint(void *addr
, unsigned int val
)
389 if (!IS_ALIGNED((unsigned long)addr
, sizeof(unsigned int)))
392 return patch_mem(addr
, val
, false);
394 NOKPROBE_SYMBOL(patch_uint
);
396 int patch_ulong(void *addr
, unsigned long val
)
398 if (!IS_ALIGNED((unsigned long)addr
, sizeof(unsigned long)))
401 return patch_mem(addr
, val
, true);
403 NOKPROBE_SYMBOL(patch_ulong
);
407 int patch_instruction(u32
*addr
, ppc_inst_t instr
)
409 return patch_mem(addr
, ppc_inst_val(instr
), false);
411 NOKPROBE_SYMBOL(patch_instruction
)
415 static int patch_memset64(u64
*addr
, u64 val
, size_t count
)
417 for (u64
*end
= addr
+ count
; addr
< end
; addr
++)
418 __put_kernel_nofault(addr
, &val
, u64
, failed
);
426 static int patch_memset32(u32
*addr
, u32 val
, size_t count
)
428 for (u32
*end
= addr
+ count
; addr
< end
; addr
++)
429 __put_kernel_nofault(addr
, &val
, u32
, failed
);
437 static int __patch_instructions(u32
*patch_addr
, u32
*code
, size_t len
, bool repeat_instr
)
439 unsigned long start
= (unsigned long)patch_addr
;
442 /* Repeat instruction */
444 ppc_inst_t instr
= ppc_inst_read(code
);
446 if (ppc_inst_prefixed(instr
)) {
447 u64 val
= ppc_inst_as_ulong(instr
);
449 err
= patch_memset64((u64
*)patch_addr
, val
, len
/ 8);
451 u32 val
= ppc_inst_val(instr
);
453 err
= patch_memset32(patch_addr
, val
, len
/ 4);
456 err
= copy_to_kernel_nofault(patch_addr
, code
, len
);
459 smp_wmb(); /* smp write barrier */
460 flush_icache_range(start
, start
+ len
);
465 * A page is mapped and instructions that fit the page are patched.
466 * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
468 static int __do_patch_instructions_mm(u32
*addr
, u32
*code
, size_t len
, bool repeat_instr
)
470 struct mm_struct
*patching_mm
, *orig_mm
;
471 unsigned long pfn
= get_patch_pfn(addr
);
472 unsigned long text_poke_addr
;
478 patching_mm
= __this_cpu_read(cpu_patching_context
.mm
);
479 text_poke_addr
= __this_cpu_read(cpu_patching_context
.addr
);
480 patch_addr
= (u32
*)(text_poke_addr
+ offset_in_page(addr
));
482 pte
= get_locked_pte(patching_mm
, text_poke_addr
, &ptl
);
486 __set_pte_at(patching_mm
, text_poke_addr
, pte
, pfn_pte(pfn
, PAGE_KERNEL
), 0);
488 /* order PTE update before use, also serves as the hwsync */
489 asm volatile("ptesync" ::: "memory");
491 /* order context switch after arbitrary prior code */
494 orig_mm
= start_using_temp_mm(patching_mm
);
496 err
= __patch_instructions(patch_addr
, code
, len
, repeat_instr
);
498 /* context synchronisation performed by __patch_instructions */
499 stop_using_temp_mm(patching_mm
, orig_mm
);
501 pte_clear(patching_mm
, text_poke_addr
, pte
);
503 * ptesync to order PTE update before TLB invalidation done
504 * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
506 local_flush_tlb_page_psize(patching_mm
, text_poke_addr
, mmu_virtual_psize
);
508 pte_unmap_unlock(pte
, ptl
);
514 * A page is mapped and instructions that fit the page are patched.
515 * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
517 static int __do_patch_instructions(u32
*addr
, u32
*code
, size_t len
, bool repeat_instr
)
519 unsigned long pfn
= get_patch_pfn(addr
);
520 unsigned long text_poke_addr
;
525 text_poke_addr
= (unsigned long)__this_cpu_read(cpu_patching_context
.addr
) & PAGE_MASK
;
526 patch_addr
= (u32
*)(text_poke_addr
+ offset_in_page(addr
));
528 pte
= __this_cpu_read(cpu_patching_context
.pte
);
529 __set_pte_at(&init_mm
, text_poke_addr
, pte
, pfn_pte(pfn
, PAGE_KERNEL
), 0);
530 /* See ptesync comment in radix__set_pte_at() */
532 asm volatile("ptesync" ::: "memory");
534 err
= __patch_instructions(patch_addr
, code
, len
, repeat_instr
);
536 pte_clear(&init_mm
, text_poke_addr
, pte
);
537 flush_tlb_kernel_range(text_poke_addr
, text_poke_addr
+ PAGE_SIZE
);
543 * Patch 'addr' with 'len' bytes of instructions from 'code'.
545 * If repeat_instr is true, the same instruction is filled for
548 int patch_instructions(u32
*addr
, u32
*code
, size_t len
, bool repeat_instr
)
555 plen
= min_t(size_t, PAGE_SIZE
- offset_in_page(addr
), len
);
557 local_irq_save(flags
);
558 if (mm_patch_enabled())
559 err
= __do_patch_instructions_mm(addr
, code
, plen
, repeat_instr
);
561 err
= __do_patch_instructions(addr
, code
, plen
, repeat_instr
);
562 local_irq_restore(flags
);
567 addr
= (u32
*)((unsigned long)addr
+ plen
);
569 code
= (u32
*)((unsigned long)code
+ plen
);
574 NOKPROBE_SYMBOL(patch_instructions
);
576 int patch_branch(u32
*addr
, unsigned long target
, int flags
)
580 if (create_branch(&instr
, addr
, target
, flags
))
583 return patch_instruction(addr
, instr
);
587 * Helper to check if a given instruction is a conditional branch
588 * Derived from the conditional checks in analyse_instr()
590 bool is_conditional_branch(ppc_inst_t instr
)
592 unsigned int opcode
= ppc_inst_primary_opcode(instr
);
594 if (opcode
== 16) /* bc, bca, bcl, bcla */
597 switch ((ppc_inst_val(instr
) >> 1) & 0x3ff) {
598 case 16: /* bclr, bclrl */
599 case 528: /* bcctr, bcctrl */
600 case 560: /* bctar, bctarl */
606 NOKPROBE_SYMBOL(is_conditional_branch
);
608 int create_cond_branch(ppc_inst_t
*instr
, const u32
*addr
,
609 unsigned long target
, int flags
)
614 if (! (flags
& BRANCH_ABSOLUTE
))
615 offset
= offset
- (unsigned long)addr
;
617 /* Check we can represent the target in the instruction format */
618 if (!is_offset_in_cond_branch_range(offset
))
621 /* Mask out the flags and target, so they don't step on each other. */
622 *instr
= ppc_inst(0x40000000 | (flags
& 0x3FF0003) | (offset
& 0xFFFC));
627 int instr_is_relative_branch(ppc_inst_t instr
)
629 if (ppc_inst_val(instr
) & BRANCH_ABSOLUTE
)
632 return instr_is_branch_iform(instr
) || instr_is_branch_bform(instr
);
635 int instr_is_relative_link_branch(ppc_inst_t instr
)
637 return instr_is_relative_branch(instr
) && (ppc_inst_val(instr
) & BRANCH_SET_LINK
);
640 static unsigned long branch_iform_target(const u32
*instr
)
644 imm
= ppc_inst_val(ppc_inst_read(instr
)) & 0x3FFFFFC;
646 /* If the top bit of the immediate value is set this is negative */
650 if ((ppc_inst_val(ppc_inst_read(instr
)) & BRANCH_ABSOLUTE
) == 0)
651 imm
+= (unsigned long)instr
;
653 return (unsigned long)imm
;
656 static unsigned long branch_bform_target(const u32
*instr
)
660 imm
= ppc_inst_val(ppc_inst_read(instr
)) & 0xFFFC;
662 /* If the top bit of the immediate value is set this is negative */
666 if ((ppc_inst_val(ppc_inst_read(instr
)) & BRANCH_ABSOLUTE
) == 0)
667 imm
+= (unsigned long)instr
;
669 return (unsigned long)imm
;
672 unsigned long branch_target(const u32
*instr
)
674 if (instr_is_branch_iform(ppc_inst_read(instr
)))
675 return branch_iform_target(instr
);
676 else if (instr_is_branch_bform(ppc_inst_read(instr
)))
677 return branch_bform_target(instr
);
682 int translate_branch(ppc_inst_t
*instr
, const u32
*dest
, const u32
*src
)
684 unsigned long target
;
685 target
= branch_target(src
);
687 if (instr_is_branch_iform(ppc_inst_read(src
)))
688 return create_branch(instr
, dest
, target
,
689 ppc_inst_val(ppc_inst_read(src
)));
690 else if (instr_is_branch_bform(ppc_inst_read(src
)))
691 return create_cond_branch(instr
, dest
, target
,
692 ppc_inst_val(ppc_inst_read(src
)));