1 // SPDX-License-Identifier: GPL-2.0-only
3 * alternative runtime patching
4 * inspired by the ARM64 and x86 version
6 * Copyright (C) 2021 Sifive.
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/uaccess.h>
13 #include <asm/alternative.h>
14 #include <asm/module.h>
15 #include <asm/sections.h>
17 #include <asm/vendorid_list.h>
21 #include <asm/text-patching.h>
23 struct cpu_manufacturer_info_t
{
24 unsigned long vendor_id
;
25 unsigned long arch_id
;
27 void (*patch_func
)(struct alt_entry
*begin
, struct alt_entry
*end
,
28 unsigned long archid
, unsigned long impid
,
32 static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t
*cpu_mfr_info
)
34 #ifdef CONFIG_RISCV_M_MODE
35 cpu_mfr_info
->vendor_id
= csr_read(CSR_MVENDORID
);
36 cpu_mfr_info
->arch_id
= csr_read(CSR_MARCHID
);
37 cpu_mfr_info
->imp_id
= csr_read(CSR_MIMPID
);
39 cpu_mfr_info
->vendor_id
= sbi_get_mvendorid();
40 cpu_mfr_info
->arch_id
= sbi_get_marchid();
41 cpu_mfr_info
->imp_id
= sbi_get_mimpid();
44 switch (cpu_mfr_info
->vendor_id
) {
45 #ifdef CONFIG_ERRATA_ANDES
47 cpu_mfr_info
->patch_func
= andes_errata_patch_func
;
50 #ifdef CONFIG_ERRATA_SIFIVE
51 case SIFIVE_VENDOR_ID
:
52 cpu_mfr_info
->patch_func
= sifive_errata_patch_func
;
55 #ifdef CONFIG_ERRATA_THEAD
57 cpu_mfr_info
->patch_func
= thead_errata_patch_func
;
61 cpu_mfr_info
->patch_func
= NULL
;
65 static u32
riscv_instruction_at(void *p
)
69 return (u32
)parcel
[0] | (u32
)parcel
[1] << 16;
72 static void riscv_alternative_fix_auipc_jalr(void *ptr
, u32 auipc_insn
,
73 u32 jalr_insn
, int patch_offset
)
75 u32 call
[2] = { auipc_insn
, jalr_insn
};
78 /* get and adjust new target address */
79 imm
= riscv_insn_extract_utype_itype_imm(auipc_insn
, jalr_insn
);
82 /* update instructions */
83 riscv_insn_insert_utype_itype_imm(&call
[0], &call
[1], imm
);
85 /* patch the call place again */
86 patch_text_nosync(ptr
, call
, sizeof(u32
) * 2);
89 static void riscv_alternative_fix_jal(void *ptr
, u32 jal_insn
, int patch_offset
)
93 /* get and adjust new target address */
94 imm
= riscv_insn_extract_jtype_imm(jal_insn
);
97 /* update instruction */
98 riscv_insn_insert_jtype_imm(&jal_insn
, imm
);
100 /* patch the call place again */
101 patch_text_nosync(ptr
, &jal_insn
, sizeof(u32
));
104 void riscv_alternative_fix_offsets(void *alt_ptr
, unsigned int len
,
107 int num_insn
= len
/ sizeof(u32
);
110 for (i
= 0; i
< num_insn
; i
++) {
111 u32 insn
= riscv_instruction_at(alt_ptr
+ i
* sizeof(u32
));
114 * May be the start of an auipc + jalr pair
115 * Needs to check that at least one more instruction
118 if (riscv_insn_is_auipc(insn
) && i
< num_insn
- 1) {
119 u32 insn2
= riscv_instruction_at(alt_ptr
+ (i
+ 1) * sizeof(u32
));
121 if (!riscv_insn_is_jalr(insn2
))
124 /* if instruction pair is a call, it will use the ra register */
125 if (RV_EXTRACT_RD_REG(insn
) != 1)
128 riscv_alternative_fix_auipc_jalr(alt_ptr
+ i
* sizeof(u32
),
129 insn
, insn2
, patch_offset
);
133 if (riscv_insn_is_jal(insn
)) {
134 s32 imm
= riscv_insn_extract_jtype_imm(insn
);
136 /* Don't modify jumps inside the alternative block */
137 if ((alt_ptr
+ i
* sizeof(u32
) + imm
) >= alt_ptr
&&
138 (alt_ptr
+ i
* sizeof(u32
) + imm
) < (alt_ptr
+ len
))
141 riscv_alternative_fix_jal(alt_ptr
+ i
* sizeof(u32
),
148 * This is called very early in the boot process (directly after we run
149 * a feature detect on the boot CPU). No need to worry about other CPUs
152 static void __init_or_module
_apply_alternatives(struct alt_entry
*begin
,
153 struct alt_entry
*end
,
156 struct cpu_manufacturer_info_t cpu_mfr_info
;
158 riscv_fill_cpu_mfr_info(&cpu_mfr_info
);
160 riscv_cpufeature_patch_func(begin
, end
, stage
);
162 if (!cpu_mfr_info
.patch_func
)
165 cpu_mfr_info
.patch_func(begin
, end
,
166 cpu_mfr_info
.arch_id
,
172 static void __init
apply_vdso_alternatives(void)
175 const Elf_Shdr
*shdr
;
177 struct alt_entry
*begin
, *end
;
179 hdr
= (Elf_Ehdr
*)vdso_start
;
180 shdr
= (void *)hdr
+ hdr
->e_shoff
;
181 alt
= find_section(hdr
, shdr
, ".alternative");
185 begin
= (void *)hdr
+ alt
->sh_offset
,
186 end
= (void *)hdr
+ alt
->sh_offset
+ alt
->sh_size
,
188 _apply_alternatives((struct alt_entry
*)begin
,
189 (struct alt_entry
*)end
,
190 RISCV_ALTERNATIVES_BOOT
);
193 static void __init
apply_vdso_alternatives(void) { }
196 void __init
apply_boot_alternatives(void)
198 /* If called on non-boot cpu things could go wrong */
199 WARN_ON(smp_processor_id() != 0);
201 _apply_alternatives((struct alt_entry
*)__alt_start
,
202 (struct alt_entry
*)__alt_end
,
203 RISCV_ALTERNATIVES_BOOT
);
205 apply_vdso_alternatives();
209 * apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
211 * Following requirements should be honoured for it to work correctly:
212 * 1) It should use PC-relative addressing for accessing kernel symbols.
213 * To achieve this we always use GCC cmodel=medany.
214 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
215 * so disable compiler instrumentation when FTRACE is enabled.
217 * Currently, the above requirements are honoured by using custom CFLAGS
218 * for alternative.o in kernel/Makefile.
220 void __init
apply_early_boot_alternatives(void)
222 #ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
223 _apply_alternatives((struct alt_entry
*)__alt_start
,
224 (struct alt_entry
*)__alt_end
,
225 RISCV_ALTERNATIVES_EARLY_BOOT
);
229 #ifdef CONFIG_MODULES
230 void apply_module_alternatives(void *start
, size_t length
)
232 _apply_alternatives((struct alt_entry
*)start
,
233 (struct alt_entry
*)(start
+ length
),
234 RISCV_ALTERNATIVES_MODULE
);