1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
5 #include <linux/spinlock.h>
6 #include <linux/stop_machine.h>
7 #include <linux/uaccess.h>
9 #include <asm/cacheflush.h>
10 #include <asm/fixmap.h>
12 #include <asm/kprobes.h>
13 #include <asm/patching.h>
14 #include <asm/sections.h>
16 static DEFINE_RAW_SPINLOCK(patch_lock
);
18 static bool is_exit_text(unsigned long addr
)
20 /* discarded with init text/data */
21 return system_state
< SYSTEM_RUNNING
&&
22 addr
>= (unsigned long)__exittext_begin
&&
23 addr
< (unsigned long)__exittext_end
;
26 static bool is_image_text(unsigned long addr
)
28 return core_kernel_text(addr
) || is_exit_text(addr
);
31 static void __kprobes
*patch_map(void *addr
, int fixmap
)
33 unsigned long uintaddr
= (uintptr_t) addr
;
34 bool image
= is_image_text(uintaddr
);
38 page
= phys_to_page(__pa_symbol(addr
));
39 else if (IS_ENABLED(CONFIG_EXECMEM
))
40 page
= vmalloc_to_page(addr
);
45 return (void *)set_fixmap_offset(fixmap
, page_to_phys(page
) +
46 (uintaddr
& ~PAGE_MASK
));
49 static void __kprobes
patch_unmap(int fixmap
)
54 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
57 int __kprobes
aarch64_insn_read(void *addr
, u32
*insnp
)
62 ret
= copy_from_kernel_nofault(&val
, addr
, AARCH64_INSN_SIZE
);
64 *insnp
= le32_to_cpu(val
);
69 static int __kprobes
__aarch64_insn_write(void *addr
, __le32 insn
)
72 unsigned long flags
= 0;
75 raw_spin_lock_irqsave(&patch_lock
, flags
);
76 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
78 ret
= copy_to_kernel_nofault(waddr
, &insn
, AARCH64_INSN_SIZE
);
80 patch_unmap(FIX_TEXT_POKE0
);
81 raw_spin_unlock_irqrestore(&patch_lock
, flags
);
86 int __kprobes
aarch64_insn_write(void *addr
, u32 insn
)
88 return __aarch64_insn_write(addr
, cpu_to_le32(insn
));
91 noinstr
int aarch64_insn_write_literal_u64(void *addr
, u64 val
)
97 raw_spin_lock_irqsave(&patch_lock
, flags
);
98 waddr
= patch_map(addr
, FIX_TEXT_POKE0
);
100 ret
= copy_to_kernel_nofault(waddr
, &val
, sizeof(val
));
102 patch_unmap(FIX_TEXT_POKE0
);
103 raw_spin_unlock_irqrestore(&patch_lock
, flags
);
108 typedef void text_poke_f(void *dst
, void *src
, size_t patched
, size_t len
);
110 static void *__text_poke(text_poke_f func
, void *addr
, void *src
, size_t len
)
118 raw_spin_lock_irqsave(&patch_lock
, flags
);
120 while (patched
< len
) {
121 ptr
= addr
+ patched
;
122 size
= min_t(size_t, PAGE_SIZE
- offset_in_page(ptr
),
125 waddr
= patch_map(ptr
, FIX_TEXT_POKE0
);
126 func(waddr
, src
, patched
, size
);
127 patch_unmap(FIX_TEXT_POKE0
);
131 raw_spin_unlock_irqrestore(&patch_lock
, flags
);
133 flush_icache_range((uintptr_t)addr
, (uintptr_t)addr
+ len
);
138 static void text_poke_memcpy(void *dst
, void *src
, size_t patched
, size_t len
)
140 copy_to_kernel_nofault(dst
, src
+ patched
, len
);
143 static void text_poke_memset(void *dst
, void *src
, size_t patched
, size_t len
)
147 memset32(dst
, c
, len
/ 4);
151 * aarch64_insn_copy - Copy instructions into (an unused part of) RX memory
152 * @dst: address to modify
153 * @src: source of the copy
154 * @len: length to copy
156 * Useful for JITs to dump new code blocks into unused regions of RX memory.
158 noinstr
void *aarch64_insn_copy(void *dst
, void *src
, size_t len
)
160 /* A64 instructions must be word aligned */
161 if ((uintptr_t)dst
& 0x3)
164 return __text_poke(text_poke_memcpy
, dst
, src
, len
);
168 * aarch64_insn_set - memset for RX memory regions.
169 * @dst: address to modify
170 * @insn: value to set
171 * @len: length of memory region.
173 * Useful for JITs to fill regions of RX memory with illegal instructions.
175 noinstr
void *aarch64_insn_set(void *dst
, u32 insn
, size_t len
)
177 if ((uintptr_t)dst
& 0x3)
180 return __text_poke(text_poke_memset
, dst
, &insn
, len
);
183 int __kprobes
aarch64_insn_patch_text_nosync(void *addr
, u32 insn
)
188 /* A64 instructions must be word aligned */
189 if ((uintptr_t)tp
& 0x3)
192 ret
= aarch64_insn_write(tp
, insn
);
194 caches_clean_inval_pou((uintptr_t)tp
,
195 (uintptr_t)tp
+ AARCH64_INSN_SIZE
);
200 struct aarch64_insn_patch
{
207 static int __kprobes
aarch64_insn_patch_text_cb(void *arg
)
210 struct aarch64_insn_patch
*pp
= arg
;
212 /* The last CPU becomes master */
213 if (atomic_inc_return(&pp
->cpu_count
) == num_online_cpus()) {
214 for (i
= 0; ret
== 0 && i
< pp
->insn_cnt
; i
++)
215 ret
= aarch64_insn_patch_text_nosync(pp
->text_addrs
[i
],
217 /* Notify other processors with an additional increment. */
218 atomic_inc(&pp
->cpu_count
);
220 while (atomic_read(&pp
->cpu_count
) <= num_online_cpus())
228 int __kprobes
aarch64_insn_patch_text(void *addrs
[], u32 insns
[], int cnt
)
230 struct aarch64_insn_patch patch
= {
234 .cpu_count
= ATOMIC_INIT(0),
240 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb
, &patch
,