1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 SiFive
6 #include <linux/spinlock.h>
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/stop_machine.h>
11 #include <asm/kprobes.h>
12 #include <asm/cacheflush.h>
13 #include <asm/fixmap.h>
14 #include <asm/patch.h>
23 static void *patch_map(void *addr
, int fixmap
)
25 uintptr_t uintaddr
= (uintptr_t) addr
;
28 if (core_kernel_text(uintaddr
))
29 page
= phys_to_page(__pa_symbol(addr
));
30 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX
))
31 page
= vmalloc_to_page(addr
);
37 return (void *)set_fixmap_offset(fixmap
, page_to_phys(page
) +
38 (uintaddr
& ~PAGE_MASK
));
40 NOKPROBE_SYMBOL(patch_map
);
42 static void patch_unmap(int fixmap
)
46 NOKPROBE_SYMBOL(patch_unmap
);
48 static int patch_insn_write(void *addr
, const void *insn
, size_t len
)
51 bool across_pages
= (((uintptr_t) addr
& ~PAGE_MASK
) + len
) > PAGE_SIZE
;
55 * Before reaching here, it was expected to lock the text_mutex
56 * already, so we don't need to give another lock here and could
57 * ensure that it was safe between each cores.
59 lockdep_assert_held(&text_mutex
);
62 patch_map(addr
+ len
, FIX_TEXT_POKE1
);
64 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
66 ret
= copy_to_kernel_nofault(waddr
, insn
, len
);
68 patch_unmap(FIX_TEXT_POKE0
);
71 patch_unmap(FIX_TEXT_POKE1
);
75 NOKPROBE_SYMBOL(patch_insn_write
);
77 static int patch_insn_write(void *addr
, const void *insn
, size_t len
)
79 return copy_to_kernel_nofault(addr
, insn
, len
);
81 NOKPROBE_SYMBOL(patch_insn_write
);
82 #endif /* CONFIG_MMU */
84 int patch_text_nosync(void *addr
, const void *insns
, size_t len
)
89 ret
= patch_insn_write(tp
, insns
, len
);
92 flush_icache_range((uintptr_t) tp
, (uintptr_t) tp
+ len
);
96 NOKPROBE_SYMBOL(patch_text_nosync
);
98 static int patch_text_cb(void *data
)
100 struct patch_insn
*patch
= data
;
103 if (atomic_inc_return(&patch
->cpu_count
) == 1) {
105 patch_text_nosync(patch
->addr
, &patch
->insn
,
106 GET_INSN_LENGTH(patch
->insn
));
107 atomic_inc(&patch
->cpu_count
);
109 while (atomic_read(&patch
->cpu_count
) <= num_online_cpus())
116 NOKPROBE_SYMBOL(patch_text_cb
);
118 int patch_text(void *addr
, u32 insn
)
120 struct patch_insn patch
= {
123 .cpu_count
= ATOMIC_INIT(0),
126 return stop_machine_cpuslocked(patch_text_cb
,
127 &patch
, cpu_online_mask
);
129 NOKPROBE_SYMBOL(patch_text
);