1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/kvm_host.h>
8 #include <linux/random.h>
9 #include <linux/memblock.h>
10 #include <asm/alternative.h>
11 #include <asm/debug-monitors.h>
13 #include <asm/kvm_mmu.h>
14 #include <asm/memory.h>
17 * The LSB of the HYP VA tag
21 * The HYP VA tag value with the region bit
27 * Compute HYP VA by using the same computation as kern_hyp_va().
29 static u64
__early_kern_hyp_va(u64 addr
)
32 addr
|= tag_val
<< tag_lsb
;
37 * Store a hyp VA <-> PA offset into a hyp-owned variable.
39 static void init_hyp_physvirt_offset(void)
41 extern s64
kvm_nvhe_sym(hyp_physvirt_offset
);
44 /* Compute the offset from the hyp VA and PA of a random symbol. */
45 kern_va
= (u64
)kvm_ksym_ref(__hyp_text_start
);
46 hyp_va
= __early_kern_hyp_va(kern_va
);
47 CHOOSE_NVHE_SYM(hyp_physvirt_offset
) = (s64
)__pa(kern_va
) - (s64
)hyp_va
;
51 * We want to generate a hyp VA with the following format (with V ==
54 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
55 * ---------------------------------------------------------
56 * | 0000000 | hyp_va_msb | random tag | kern linear VA |
57 * |--------- tag_val -----------|----- va_mask ---|
59 * which does not conflict with the idmap regions.
61 __init
void kvm_compute_layout(void)
63 phys_addr_t idmap_addr
= __pa_symbol(__hyp_idmap_text_start
);
66 /* Where is my RAM region? */
67 hyp_va_msb
= idmap_addr
& BIT(vabits_actual
- 1);
68 hyp_va_msb
^= BIT(vabits_actual
- 1);
70 tag_lsb
= fls64((u64
)phys_to_virt(memblock_start_of_DRAM()) ^
71 (u64
)(high_memory
- 1));
73 va_mask
= GENMASK_ULL(tag_lsb
- 1, 0);
76 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE
) && tag_lsb
!= (vabits_actual
- 1)) {
77 /* We have some free bits to insert a random tag. */
78 tag_val
|= get_random_long() & GENMASK_ULL(vabits_actual
- 2, tag_lsb
);
82 init_hyp_physvirt_offset();
85 static u32
compute_instruction(int n
, u32 rd
, u32 rn
)
87 u32 insn
= AARCH64_BREAK_FAULT
;
91 insn
= aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND
,
92 AARCH64_INSN_VARIANT_64BIT
,
97 /* ROR is a variant of EXTR with Rm = Rn */
98 insn
= aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT
,
104 insn
= aarch64_insn_gen_add_sub_imm(rd
, rn
,
105 tag_val
& GENMASK(11, 0),
106 AARCH64_INSN_VARIANT_64BIT
,
107 AARCH64_INSN_ADSB_ADD
);
111 insn
= aarch64_insn_gen_add_sub_imm(rd
, rn
,
112 tag_val
& GENMASK(23, 12),
113 AARCH64_INSN_VARIANT_64BIT
,
114 AARCH64_INSN_ADSB_ADD
);
118 /* ROR is a variant of EXTR with Rm = Rn */
119 insn
= aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT
,
120 rn
, rn
, rd
, 64 - tag_lsb
);
127 void __init
kvm_update_va_mask(struct alt_instr
*alt
,
128 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
132 BUG_ON(nr_inst
!= 5);
134 for (i
= 0; i
< nr_inst
; i
++) {
135 u32 rd
, rn
, insn
, oinsn
;
138 * VHE doesn't need any address translation, let's NOP
141 * Alternatively, if the tag is zero (because the layout
142 * dictates it and we don't have any spare bits in the
143 * address), NOP everything after masking the kernel VA.
145 if (has_vhe() || (!tag_val
&& i
> 0)) {
146 updptr
[i
] = cpu_to_le32(aarch64_insn_gen_nop());
150 oinsn
= le32_to_cpu(origptr
[i
]);
151 rd
= aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD
, oinsn
);
152 rn
= aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN
, oinsn
);
154 insn
= compute_instruction(i
, rd
, rn
);
155 BUG_ON(insn
== AARCH64_BREAK_FAULT
);
157 updptr
[i
] = cpu_to_le32(insn
);
161 void kvm_patch_vector_branch(struct alt_instr
*alt
,
162 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
167 BUG_ON(nr_inst
!= 4);
169 if (!cpus_have_const_cap(ARM64_SPECTRE_V3A
) || WARN_ON_ONCE(has_vhe()))
173 * Compute HYP VA by using the same computation as kern_hyp_va()
175 addr
= __early_kern_hyp_va((u64
)kvm_ksym_ref(__kvm_hyp_vector
));
177 /* Use PC[10:7] to branch to the same vector in KVM */
178 addr
|= ((u64
)origptr
& GENMASK_ULL(10, 7));
181 * Branch over the preamble in order to avoid the initial store on
182 * the stack (which we already perform in the hardening vectors).
184 addr
+= KVM_VECTOR_PREAMBLE
;
186 /* movz x0, #(addr & 0xffff) */
187 insn
= aarch64_insn_gen_movewide(AARCH64_INSN_REG_0
,
190 AARCH64_INSN_VARIANT_64BIT
,
191 AARCH64_INSN_MOVEWIDE_ZERO
);
192 *updptr
++ = cpu_to_le32(insn
);
194 /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
195 insn
= aarch64_insn_gen_movewide(AARCH64_INSN_REG_0
,
198 AARCH64_INSN_VARIANT_64BIT
,
199 AARCH64_INSN_MOVEWIDE_KEEP
);
200 *updptr
++ = cpu_to_le32(insn
);
202 /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
203 insn
= aarch64_insn_gen_movewide(AARCH64_INSN_REG_0
,
206 AARCH64_INSN_VARIANT_64BIT
,
207 AARCH64_INSN_MOVEWIDE_KEEP
);
208 *updptr
++ = cpu_to_le32(insn
);
211 insn
= aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0
,
212 AARCH64_INSN_BRANCH_NOLINK
);
213 *updptr
++ = cpu_to_le32(insn
);
216 static void generate_mov_q(u64 val
, __le32
*origptr
, __le32
*updptr
, int nr_inst
)
220 BUG_ON(nr_inst
!= 4);
222 /* Compute target register */
223 oinsn
= le32_to_cpu(*origptr
);
224 rd
= aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD
, oinsn
);
226 /* movz rd, #(val & 0xffff) */
227 insn
= aarch64_insn_gen_movewide(rd
,
230 AARCH64_INSN_VARIANT_64BIT
,
231 AARCH64_INSN_MOVEWIDE_ZERO
);
232 *updptr
++ = cpu_to_le32(insn
);
234 /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
235 insn
= aarch64_insn_gen_movewide(rd
,
238 AARCH64_INSN_VARIANT_64BIT
,
239 AARCH64_INSN_MOVEWIDE_KEEP
);
240 *updptr
++ = cpu_to_le32(insn
);
242 /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
243 insn
= aarch64_insn_gen_movewide(rd
,
246 AARCH64_INSN_VARIANT_64BIT
,
247 AARCH64_INSN_MOVEWIDE_KEEP
);
248 *updptr
++ = cpu_to_le32(insn
);
250 /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
251 insn
= aarch64_insn_gen_movewide(rd
,
254 AARCH64_INSN_VARIANT_64BIT
,
255 AARCH64_INSN_MOVEWIDE_KEEP
);
256 *updptr
++ = cpu_to_le32(insn
);
259 void kvm_update_kimg_phys_offset(struct alt_instr
*alt
,
260 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
262 generate_mov_q(kimage_voffset
+ PHYS_OFFSET
, origptr
, updptr
, nr_inst
);
265 void kvm_get_kimage_voffset(struct alt_instr
*alt
,
266 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
268 generate_mov_q(kimage_voffset
, origptr
, updptr
, nr_inst
);