drm/panthor: Don't add write fences to the shared BOs
[drm/drm-misc.git] / arch / riscv / kernel / patch.c
blob34ef522f07a8c2179b5643b07c82f22284a1907b
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 SiFive
4 */
6 #include <linux/spinlock.h>
7 #include <linux/mm.h>
8 #include <linux/memory.h>
9 #include <linux/string.h>
10 #include <linux/uaccess.h>
11 #include <linux/stop_machine.h>
12 #include <asm/kprobes.h>
13 #include <asm/cacheflush.h>
14 #include <asm/fixmap.h>
15 #include <asm/ftrace.h>
16 #include <asm/patch.h>
17 #include <asm/sections.h>
19 struct patch_insn {
20 void *addr;
21 u32 *insns;
22 size_t len;
23 atomic_t cpu_count;
26 int riscv_patch_in_stop_machine = false;
28 #ifdef CONFIG_MMU
30 static inline bool is_kernel_exittext(uintptr_t addr)
32 return system_state < SYSTEM_RUNNING &&
33 addr >= (uintptr_t)__exittext_begin &&
34 addr < (uintptr_t)__exittext_end;
38 * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
39 * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
40 * So use '__always_inline' and 'const unsigned int fixmap' here.
42 static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
44 uintptr_t uintaddr = (uintptr_t) addr;
45 struct page *page;
47 if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
48 page = phys_to_page(__pa_symbol(addr));
49 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
50 page = vmalloc_to_page(addr);
51 else
52 return addr;
54 BUG_ON(!page);
56 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
57 offset_in_page(addr));
60 static void patch_unmap(int fixmap)
62 clear_fixmap(fixmap);
64 NOKPROBE_SYMBOL(patch_unmap);
66 static int __patch_insn_set(void *addr, u8 c, size_t len)
68 bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE;
69 void *waddr = addr;
72 * Only two pages can be mapped at a time for writing.
74 if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
75 return -EINVAL;
77 * Before reaching here, it was expected to lock the text_mutex
78 * already, so we don't need to give another lock here and could
79 * ensure that it was safe between each cores.
81 lockdep_assert_held(&text_mutex);
83 preempt_disable();
85 if (across_pages)
86 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
88 waddr = patch_map(addr, FIX_TEXT_POKE0);
90 memset(waddr, c, len);
93 * We could have just patched a function that is about to be
94 * called so make sure we don't execute partially patched
95 * instructions by flushing the icache as soon as possible.
97 local_flush_icache_range((unsigned long)waddr,
98 (unsigned long)waddr + len);
100 patch_unmap(FIX_TEXT_POKE0);
102 if (across_pages)
103 patch_unmap(FIX_TEXT_POKE1);
105 preempt_enable();
107 return 0;
109 NOKPROBE_SYMBOL(__patch_insn_set);
111 static int __patch_insn_write(void *addr, const void *insn, size_t len)
113 bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE;
114 void *waddr = addr;
115 int ret;
118 * Only two pages can be mapped at a time for writing.
120 if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
121 return -EINVAL;
124 * Before reaching here, it was expected to lock the text_mutex
125 * already, so we don't need to give another lock here and could
126 * ensure that it was safe between each cores.
128 * We're currently using stop_machine() for ftrace & kprobes, and while
129 * that ensures text_mutex is held before installing the mappings it
130 * does not ensure text_mutex is held by the calling thread. That's
131 * safe but triggers a lockdep failure, so just elide it for that
132 * specific case.
134 if (!riscv_patch_in_stop_machine)
135 lockdep_assert_held(&text_mutex);
137 preempt_disable();
139 if (across_pages)
140 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
142 waddr = patch_map(addr, FIX_TEXT_POKE0);
144 ret = copy_to_kernel_nofault(waddr, insn, len);
147 * We could have just patched a function that is about to be
148 * called so make sure we don't execute partially patched
149 * instructions by flushing the icache as soon as possible.
151 local_flush_icache_range((unsigned long)waddr,
152 (unsigned long)waddr + len);
154 patch_unmap(FIX_TEXT_POKE0);
156 if (across_pages)
157 patch_unmap(FIX_TEXT_POKE1);
159 preempt_enable();
161 return ret;
163 NOKPROBE_SYMBOL(__patch_insn_write);
164 #else
165 static int __patch_insn_set(void *addr, u8 c, size_t len)
167 memset(addr, c, len);
169 return 0;
171 NOKPROBE_SYMBOL(__patch_insn_set);
173 static int __patch_insn_write(void *addr, const void *insn, size_t len)
175 return copy_to_kernel_nofault(addr, insn, len);
177 NOKPROBE_SYMBOL(__patch_insn_write);
178 #endif /* CONFIG_MMU */
180 static int patch_insn_set(void *addr, u8 c, size_t len)
182 size_t size;
183 int ret;
186 * __patch_insn_set() can only work on 2 pages at a time so call it in a
187 * loop with len <= 2 * PAGE_SIZE.
189 while (len) {
190 size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
191 ret = __patch_insn_set(addr, c, size);
192 if (ret)
193 return ret;
195 addr += size;
196 len -= size;
199 return 0;
201 NOKPROBE_SYMBOL(patch_insn_set);
203 int patch_text_set_nosync(void *addr, u8 c, size_t len)
205 int ret;
207 ret = patch_insn_set(addr, c, len);
208 if (!ret)
209 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
211 return ret;
213 NOKPROBE_SYMBOL(patch_text_set_nosync);
215 int patch_insn_write(void *addr, const void *insn, size_t len)
217 size_t size;
218 int ret;
221 * Copy the instructions to the destination address, two pages at a time
222 * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE.
224 while (len) {
225 size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
226 ret = __patch_insn_write(addr, insn, size);
227 if (ret)
228 return ret;
230 addr += size;
231 insn += size;
232 len -= size;
235 return 0;
237 NOKPROBE_SYMBOL(patch_insn_write);
239 int patch_text_nosync(void *addr, const void *insns, size_t len)
241 int ret;
243 ret = patch_insn_write(addr, insns, len);
244 if (!ret)
245 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
247 return ret;
249 NOKPROBE_SYMBOL(patch_text_nosync);
251 static int patch_text_cb(void *data)
253 struct patch_insn *patch = data;
254 int ret = 0;
256 if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
257 ret = patch_insn_write(patch->addr, patch->insns, patch->len);
259 * Make sure the patching store is effective *before* we
260 * increment the counter which releases all waiting CPUs
261 * by using the release variant of atomic increment. The
262 * release pairs with the call to local_flush_icache_all()
263 * on the waiting CPU.
265 atomic_inc_return_release(&patch->cpu_count);
266 } else {
267 while (atomic_read(&patch->cpu_count) <= num_online_cpus())
268 cpu_relax();
270 local_flush_icache_all();
273 return ret;
275 NOKPROBE_SYMBOL(patch_text_cb);
277 int patch_text(void *addr, u32 *insns, size_t len)
279 int ret;
280 struct patch_insn patch = {
281 .addr = addr,
282 .insns = insns,
283 .len = len,
284 .cpu_count = ATOMIC_INIT(0),
288 * kprobes takes text_mutex, before calling patch_text(), but as we call
289 * calls stop_machine(), the lockdep assertion in patch_insn_write()
290 * gets confused by the context in which the lock is taken.
291 * Instead, ensure the lock is held before calling stop_machine(), and
292 * set riscv_patch_in_stop_machine to skip the check in
293 * patch_insn_write().
295 lockdep_assert_held(&text_mutex);
296 riscv_patch_in_stop_machine = true;
297 ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
298 riscv_patch_in_stop_machine = false;
299 return ret;
301 NOKPROBE_SYMBOL(patch_text);