2 * alternative runtime patching
3 * inspired by the x86 version
5 * Copyright (C) 2014 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #define pr_fmt(fmt) "alternatives: " fmt
22 #include <linux/init.h>
23 #include <linux/cpu.h>
24 #include <asm/cacheflush.h>
25 #include <asm/alternative.h>
26 #include <asm/cpufeature.h>
28 #include <asm/sections.h>
29 #include <linux/stop_machine.h>
31 #define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
32 #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
33 #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
35 static int all_alternatives_applied
;
37 static DECLARE_BITMAP(applied_alternatives
, ARM64_NCAPS
);
40 struct alt_instr
*begin
;
41 struct alt_instr
*end
;
44 bool alternative_is_applied(u16 cpufeature
)
46 if (WARN_ON(cpufeature
>= ARM64_NCAPS
))
49 return test_bit(cpufeature
, applied_alternatives
);
53 * Check if the target PC is within an alternative block.
55 static bool branch_insn_requires_update(struct alt_instr
*alt
, unsigned long pc
)
57 unsigned long replptr
;
59 if (kernel_text_address(pc
))
62 replptr
= (unsigned long)ALT_REPL_PTR(alt
);
63 if (pc
>= replptr
&& pc
<= (replptr
+ alt
->alt_len
))
67 * Branching into *another* alternate sequence is doomed, and
68 * we're not even trying to fix it up.
73 #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
75 static u32
get_alt_insn(struct alt_instr
*alt
, __le32
*insnptr
, __le32
*altinsnptr
)
79 insn
= le32_to_cpu(*altinsnptr
);
81 if (aarch64_insn_is_branch_imm(insn
)) {
82 s32 offset
= aarch64_get_branch_offset(insn
);
85 target
= (unsigned long)altinsnptr
+ offset
;
88 * If we're branching inside the alternate sequence,
89 * do not rewrite the instruction, as it is already
90 * correct. Otherwise, generate the new instruction.
92 if (branch_insn_requires_update(alt
, target
)) {
93 offset
= target
- (unsigned long)insnptr
;
94 insn
= aarch64_set_branch_offset(insn
, offset
);
96 } else if (aarch64_insn_is_adrp(insn
)) {
97 s32 orig_offset
, new_offset
;
101 * If we're replacing an adrp instruction, which uses PC-relative
102 * immediate addressing, adjust the offset to reflect the new
103 * PC. adrp operates on 4K aligned addresses.
105 orig_offset
= aarch64_insn_adrp_get_offset(insn
);
106 target
= align_down(altinsnptr
, SZ_4K
) + orig_offset
;
107 new_offset
= target
- align_down(insnptr
, SZ_4K
);
108 insn
= aarch64_insn_adrp_set_offset(insn
, new_offset
);
109 } else if (aarch64_insn_uses_literal(insn
)) {
111 * Disallow patching unhandled instructions using PC relative
120 static void patch_alternative(struct alt_instr
*alt
,
121 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
126 replptr
= ALT_REPL_PTR(alt
);
127 for (i
= 0; i
< nr_inst
; i
++) {
130 insn
= get_alt_insn(alt
, origptr
+ i
, replptr
+ i
);
131 updptr
[i
] = cpu_to_le32(insn
);
136 * We provide our own, private D-cache cleaning function so that we don't
137 * accidentally call into the cache.S code, which is patched by us at
140 static void clean_dcache_range_nopatch(u64 start
, u64 end
)
142 u64 cur
, d_size
, ctr_el0
;
144 ctr_el0
= read_sanitised_ftr_reg(SYS_CTR_EL0
);
145 d_size
= 4 << cpuid_feature_extract_unsigned_field(ctr_el0
,
147 cur
= start
& ~(d_size
- 1);
150 * We must clean+invalidate to the PoC in order to avoid
151 * Cortex-A53 errata 826319, 827319, 824069 and 819472
152 * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
154 asm volatile("dc civac, %0" : : "r" (cur
) : "memory");
155 } while (cur
+= d_size
, cur
< end
);
158 static void __apply_alternatives(void *alt_region
, bool is_module
,
159 unsigned long *feature_mask
)
161 struct alt_instr
*alt
;
162 struct alt_region
*region
= alt_region
;
163 __le32
*origptr
, *updptr
;
164 alternative_cb_t alt_cb
;
166 for (alt
= region
->begin
; alt
< region
->end
; alt
++) {
169 if (!test_bit(alt
->cpufeature
, feature_mask
))
172 /* Use ARM64_CB_PATCH as an unconditional patch */
173 if (alt
->cpufeature
< ARM64_CB_PATCH
&&
174 !cpus_have_cap(alt
->cpufeature
))
177 if (alt
->cpufeature
== ARM64_CB_PATCH
)
178 BUG_ON(alt
->alt_len
!= 0);
180 BUG_ON(alt
->alt_len
!= alt
->orig_len
);
182 pr_info_once("patching kernel code\n");
184 origptr
= ALT_ORIG_PTR(alt
);
185 updptr
= is_module
? origptr
: lm_alias(origptr
);
186 nr_inst
= alt
->orig_len
/ AARCH64_INSN_SIZE
;
188 if (alt
->cpufeature
< ARM64_CB_PATCH
)
189 alt_cb
= patch_alternative
;
191 alt_cb
= ALT_REPL_PTR(alt
);
193 alt_cb(alt
, origptr
, updptr
, nr_inst
);
196 clean_dcache_range_nopatch((u64
)origptr
,
197 (u64
)(origptr
+ nr_inst
));
202 * The core module code takes care of cache maintenance in
203 * flush_module_icache().
207 __flush_icache_all();
210 /* Ignore ARM64_CB bit from feature mask */
211 bitmap_or(applied_alternatives
, applied_alternatives
,
212 feature_mask
, ARM64_NCAPS
);
213 bitmap_and(applied_alternatives
, applied_alternatives
,
214 cpu_hwcaps
, ARM64_NCAPS
);
219 * We might be patching the stop_machine state machine, so implement a
220 * really simple polling protocol here.
222 static int __apply_alternatives_multi_stop(void *unused
)
224 struct alt_region region
= {
225 .begin
= (struct alt_instr
*)__alt_instructions
,
226 .end
= (struct alt_instr
*)__alt_instructions_end
,
229 /* We always have a CPU 0 at this point (__init) */
230 if (smp_processor_id()) {
231 while (!READ_ONCE(all_alternatives_applied
))
235 DECLARE_BITMAP(remaining_capabilities
, ARM64_NPATCHABLE
);
237 bitmap_complement(remaining_capabilities
, boot_capabilities
,
240 BUG_ON(all_alternatives_applied
);
241 __apply_alternatives(®ion
, false, remaining_capabilities
);
242 /* Barriers provided by the cache flushing */
243 WRITE_ONCE(all_alternatives_applied
, 1);
249 void __init
apply_alternatives_all(void)
251 /* better not try code patching on a live SMP system */
252 stop_machine(__apply_alternatives_multi_stop
, NULL
, cpu_online_mask
);
256 * This is called very early in the boot process (directly after we run
257 * a feature detect on the boot CPU). No need to worry about other CPUs
260 void __init
apply_boot_alternatives(void)
262 struct alt_region region
= {
263 .begin
= (struct alt_instr
*)__alt_instructions
,
264 .end
= (struct alt_instr
*)__alt_instructions_end
,
267 /* If called on non-boot cpu things could go wrong */
268 WARN_ON(smp_processor_id() != 0);
270 __apply_alternatives(®ion
, false, &boot_capabilities
[0]);
273 #ifdef CONFIG_MODULES
274 void apply_alternatives_module(void *start
, size_t length
)
276 struct alt_region region
= {
278 .end
= start
+ length
,
280 DECLARE_BITMAP(all_capabilities
, ARM64_NPATCHABLE
);
282 bitmap_fill(all_capabilities
, ARM64_NPATCHABLE
);
284 __apply_alternatives(®ion
, true, &all_capabilities
[0]);