Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / arm64 / kernel / alternative.c
blob8ff6610af49664f01ceef2a5e8f6fae52e861a43
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * alternative runtime patching
4 * inspired by the x86 version
6 * Copyright (C) 2014 ARM Ltd.
7 */
9 #define pr_fmt(fmt) "alternatives: " fmt
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/elf.h>
14 #include <asm/cacheflush.h>
15 #include <asm/alternative.h>
16 #include <asm/cpufeature.h>
17 #include <asm/insn.h>
18 #include <asm/module.h>
19 #include <asm/sections.h>
20 #include <asm/vdso.h>
21 #include <linux/stop_machine.h>
23 #define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
24 #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
25 #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
27 #define ALT_CAP(a) ((a)->cpucap & ~ARM64_CB_BIT)
28 #define ALT_HAS_CB(a) ((a)->cpucap & ARM64_CB_BIT)
30 /* Volatile, as we may be patching the guts of READ_ONCE() */
31 static volatile int all_alternatives_applied;
33 static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
35 struct alt_region {
36 struct alt_instr *begin;
37 struct alt_instr *end;
40 bool alternative_is_applied(u16 cpucap)
42 if (WARN_ON(cpucap >= ARM64_NCAPS))
43 return false;
45 return test_bit(cpucap, applied_alternatives);
49 * Check if the target PC is within an alternative block.
51 static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
53 unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
54 return !(pc >= replptr && pc <= (replptr + alt->alt_len));
57 #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
59 static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
61 u32 insn;
63 insn = le32_to_cpu(*altinsnptr);
65 if (aarch64_insn_is_branch_imm(insn)) {
66 s32 offset = aarch64_get_branch_offset(insn);
67 unsigned long target;
69 target = (unsigned long)altinsnptr + offset;
72 * If we're branching inside the alternate sequence,
73 * do not rewrite the instruction, as it is already
74 * correct. Otherwise, generate the new instruction.
76 if (branch_insn_requires_update(alt, target)) {
77 offset = target - (unsigned long)insnptr;
78 insn = aarch64_set_branch_offset(insn, offset);
80 } else if (aarch64_insn_is_adrp(insn)) {
81 s32 orig_offset, new_offset;
82 unsigned long target;
85 * If we're replacing an adrp instruction, which uses PC-relative
86 * immediate addressing, adjust the offset to reflect the new
87 * PC. adrp operates on 4K aligned addresses.
89 orig_offset = aarch64_insn_adrp_get_offset(insn);
90 target = align_down(altinsnptr, SZ_4K) + orig_offset;
91 new_offset = target - align_down(insnptr, SZ_4K);
92 insn = aarch64_insn_adrp_set_offset(insn, new_offset);
93 } else if (aarch64_insn_uses_literal(insn)) {
95 * Disallow patching unhandled instructions using PC relative
96 * literal addresses
98 BUG();
101 return insn;
104 static noinstr void patch_alternative(struct alt_instr *alt,
105 __le32 *origptr, __le32 *updptr, int nr_inst)
107 __le32 *replptr;
108 int i;
110 replptr = ALT_REPL_PTR(alt);
111 for (i = 0; i < nr_inst; i++) {
112 u32 insn;
114 insn = get_alt_insn(alt, origptr + i, replptr + i);
115 updptr[i] = cpu_to_le32(insn);
120 * We provide our own, private D-cache cleaning function so that we don't
121 * accidentally call into the cache.S code, which is patched by us at
122 * runtime.
124 static noinstr void clean_dcache_range_nopatch(u64 start, u64 end)
126 u64 cur, d_size, ctr_el0;
128 ctr_el0 = arm64_ftr_reg_ctrel0.sys_val;
129 d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
130 CTR_EL0_DminLine_SHIFT);
131 cur = start & ~(d_size - 1);
132 do {
134 * We must clean+invalidate to the PoC in order to avoid
135 * Cortex-A53 errata 826319, 827319, 824069 and 819472
136 * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
138 asm volatile("dc civac, %0" : : "r" (cur) : "memory");
139 } while (cur += d_size, cur < end);
142 static void __apply_alternatives(const struct alt_region *region,
143 bool is_module,
144 unsigned long *cpucap_mask)
146 struct alt_instr *alt;
147 __le32 *origptr, *updptr;
148 alternative_cb_t alt_cb;
150 for (alt = region->begin; alt < region->end; alt++) {
151 int nr_inst;
152 int cap = ALT_CAP(alt);
154 if (!test_bit(cap, cpucap_mask))
155 continue;
157 if (!cpus_have_cap(cap))
158 continue;
160 if (ALT_HAS_CB(alt))
161 BUG_ON(alt->alt_len != 0);
162 else
163 BUG_ON(alt->alt_len != alt->orig_len);
165 origptr = ALT_ORIG_PTR(alt);
166 updptr = is_module ? origptr : lm_alias(origptr);
167 nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
169 if (ALT_HAS_CB(alt))
170 alt_cb = ALT_REPL_PTR(alt);
171 else
172 alt_cb = patch_alternative;
174 alt_cb(alt, origptr, updptr, nr_inst);
176 if (!is_module) {
177 clean_dcache_range_nopatch((u64)origptr,
178 (u64)(origptr + nr_inst));
183 * The core module code takes care of cache maintenance in
184 * flush_module_icache().
186 if (!is_module) {
187 dsb(ish);
188 icache_inval_all_pou();
189 isb();
191 bitmap_or(applied_alternatives, applied_alternatives,
192 cpucap_mask, ARM64_NCAPS);
193 bitmap_and(applied_alternatives, applied_alternatives,
194 system_cpucaps, ARM64_NCAPS);
198 static void __init apply_alternatives_vdso(void)
200 struct alt_region region;
201 const struct elf64_hdr *hdr;
202 const struct elf64_shdr *shdr;
203 const struct elf64_shdr *alt;
204 DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
206 bitmap_fill(all_capabilities, ARM64_NCAPS);
208 hdr = (struct elf64_hdr *)vdso_start;
209 shdr = (void *)hdr + hdr->e_shoff;
210 alt = find_section(hdr, shdr, ".altinstructions");
211 if (!alt)
212 return;
214 region = (struct alt_region){
215 .begin = (void *)hdr + alt->sh_offset,
216 .end = (void *)hdr + alt->sh_offset + alt->sh_size,
219 __apply_alternatives(&region, false, &all_capabilities[0]);
222 static const struct alt_region kernel_alternatives __initconst = {
223 .begin = (struct alt_instr *)__alt_instructions,
224 .end = (struct alt_instr *)__alt_instructions_end,
228 * We might be patching the stop_machine state machine, so implement a
229 * really simple polling protocol here.
231 static int __init __apply_alternatives_multi_stop(void *unused)
233 /* We always have a CPU 0 at this point (__init) */
234 if (smp_processor_id()) {
235 while (!all_alternatives_applied)
236 cpu_relax();
237 isb();
238 } else {
239 DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
241 bitmap_complement(remaining_capabilities, boot_cpucaps,
242 ARM64_NCAPS);
244 BUG_ON(all_alternatives_applied);
245 __apply_alternatives(&kernel_alternatives, false,
246 remaining_capabilities);
247 /* Barriers provided by the cache flushing */
248 all_alternatives_applied = 1;
251 return 0;
254 void __init apply_alternatives_all(void)
256 pr_info("applying system-wide alternatives\n");
258 apply_alternatives_vdso();
259 /* better not try code patching on a live SMP system */
260 stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
264 * This is called very early in the boot process (directly after we run
265 * a feature detect on the boot CPU). No need to worry about other CPUs
266 * here.
268 void __init apply_boot_alternatives(void)
270 /* If called on non-boot cpu things could go wrong */
271 WARN_ON(smp_processor_id() != 0);
273 pr_info("applying boot alternatives\n");
275 __apply_alternatives(&kernel_alternatives, false,
276 &boot_cpucaps[0]);
279 #ifdef CONFIG_MODULES
280 void apply_alternatives_module(void *start, size_t length)
282 struct alt_region region = {
283 .begin = start,
284 .end = start + length,
286 DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
288 bitmap_fill(all_capabilities, ARM64_NCAPS);
290 __apply_alternatives(&region, true, &all_capabilities[0]);
292 #endif
294 noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
295 __le32 *updptr, int nr_inst)
297 for (int i = 0; i < nr_inst; i++)
298 updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
300 EXPORT_SYMBOL(alt_cb_patch_nops);