2 * alternative runtime patching
3 * inspired by the x86 version
5 * Copyright (C) 2014 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #define pr_fmt(fmt) "alternatives: " fmt
22 #include <linux/init.h>
23 #include <linux/cpu.h>
24 #include <asm/cacheflush.h>
25 #include <asm/alternative.h>
26 #include <asm/cpufeature.h>
28 #include <linux/stop_machine.h>
30 #define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
31 #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
32 #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
34 extern struct alt_instr __alt_instructions
[], __alt_instructions_end
[];
37 struct alt_instr
*begin
;
38 struct alt_instr
*end
;
42 * Check if the target PC is within an alternative block.
44 static bool branch_insn_requires_update(struct alt_instr
*alt
, unsigned long pc
)
46 unsigned long replptr
;
48 if (kernel_text_address(pc
))
51 replptr
= (unsigned long)ALT_REPL_PTR(alt
);
52 if (pc
>= replptr
&& pc
<= (replptr
+ alt
->alt_len
))
56 * Branching into *another* alternate sequence is doomed, and
57 * we're not even trying to fix it up.
62 static u32
get_alt_insn(struct alt_instr
*alt
, u32
*insnptr
, u32
*altinsnptr
)
66 insn
= le32_to_cpu(*altinsnptr
);
68 if (aarch64_insn_is_branch_imm(insn
)) {
69 s32 offset
= aarch64_get_branch_offset(insn
);
72 target
= (unsigned long)altinsnptr
+ offset
;
75 * If we're branching inside the alternate sequence,
76 * do not rewrite the instruction, as it is already
77 * correct. Otherwise, generate the new instruction.
79 if (branch_insn_requires_update(alt
, target
)) {
80 offset
= target
- (unsigned long)insnptr
;
81 insn
= aarch64_set_branch_offset(insn
, offset
);
88 static void __apply_alternatives(void *alt_region
)
90 struct alt_instr
*alt
;
91 struct alt_region
*region
= alt_region
;
92 u32
*origptr
, *replptr
;
94 for (alt
= region
->begin
; alt
< region
->end
; alt
++) {
98 if (!cpus_have_cap(alt
->cpufeature
))
101 BUG_ON(alt
->alt_len
!= alt
->orig_len
);
103 pr_info_once("patching kernel code\n");
105 origptr
= ALT_ORIG_PTR(alt
);
106 replptr
= ALT_REPL_PTR(alt
);
107 nr_inst
= alt
->alt_len
/ sizeof(insn
);
109 for (i
= 0; i
< nr_inst
; i
++) {
110 insn
= get_alt_insn(alt
, origptr
+ i
, replptr
+ i
);
111 *(origptr
+ i
) = cpu_to_le32(insn
);
114 flush_icache_range((uintptr_t)origptr
,
115 (uintptr_t)(origptr
+ nr_inst
));
120 * We might be patching the stop_machine state machine, so implement a
121 * really simple polling protocol here.
123 static int __apply_alternatives_multi_stop(void *unused
)
125 static int patched
= 0;
126 struct alt_region region
= {
127 .begin
= __alt_instructions
,
128 .end
= __alt_instructions_end
,
131 /* We always have a CPU 0 at this point (__init) */
132 if (smp_processor_id()) {
133 while (!READ_ONCE(patched
))
138 __apply_alternatives(®ion
);
139 /* Barriers provided by the cache flushing */
140 WRITE_ONCE(patched
, 1);
146 void __init
apply_alternatives_all(void)
148 /* better not try code patching on a live SMP system */
149 stop_machine(__apply_alternatives_multi_stop
, NULL
, cpu_online_mask
);
152 void apply_alternatives(void *start
, size_t length
)
154 struct alt_region region
= {
156 .end
= start
+ length
,
159 __apply_alternatives(®ion
);
162 void free_alternatives_memory(void)
164 free_reserved_area(__alt_instructions
, __alt_instructions_end
,