1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/kvm_host.h>
8 #include <linux/random.h>
9 #include <linux/memblock.h>
10 #include <asm/alternative.h>
11 #include <asm/debug-monitors.h>
13 #include <asm/kvm_mmu.h>
16 * The LSB of the random hyp VA tag or 0 if no randomization is used.
20 * The random hyp VA tag value with the region bit if hyp randomization is used
25 static void compute_layout(void)
27 phys_addr_t idmap_addr
= __pa_symbol(__hyp_idmap_text_start
);
31 /* Where is my RAM region? */
32 hyp_va_msb
= idmap_addr
& BIT(vabits_actual
- 1);
33 hyp_va_msb
^= BIT(vabits_actual
- 1);
35 kva_msb
= fls64((u64
)phys_to_virt(memblock_start_of_DRAM()) ^
36 (u64
)(high_memory
- 1));
38 if (kva_msb
== (vabits_actual
- 1)) {
40 * No space in the address, let's compute the mask so
41 * that it covers (vabits_actual - 1) bits, and the region
42 * bit. The tag stays set to zero.
44 va_mask
= BIT(vabits_actual
- 1) - 1;
45 va_mask
|= hyp_va_msb
;
48 * We do have some free bits to insert a random tag.
49 * Hyp VAs are now created from kernel linear map VAs
50 * using the following formula (with V == vabits_actual):
52 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
53 * ---------------------------------------------------------
54 * | 0000000 | hyp_va_msb | random tag | kern linear VA |
57 va_mask
= GENMASK_ULL(tag_lsb
- 1, 0);
58 tag_val
= get_random_long() & GENMASK_ULL(vabits_actual
- 2, tag_lsb
);
59 tag_val
|= hyp_va_msb
;
64 static u32
compute_instruction(int n
, u32 rd
, u32 rn
)
66 u32 insn
= AARCH64_BREAK_FAULT
;
70 insn
= aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND
,
71 AARCH64_INSN_VARIANT_64BIT
,
76 /* ROR is a variant of EXTR with Rm = Rn */
77 insn
= aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT
,
83 insn
= aarch64_insn_gen_add_sub_imm(rd
, rn
,
84 tag_val
& GENMASK(11, 0),
85 AARCH64_INSN_VARIANT_64BIT
,
86 AARCH64_INSN_ADSB_ADD
);
90 insn
= aarch64_insn_gen_add_sub_imm(rd
, rn
,
91 tag_val
& GENMASK(23, 12),
92 AARCH64_INSN_VARIANT_64BIT
,
93 AARCH64_INSN_ADSB_ADD
);
97 /* ROR is a variant of EXTR with Rm = Rn */
98 insn
= aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT
,
99 rn
, rn
, rd
, 64 - tag_lsb
);
106 void __init
kvm_update_va_mask(struct alt_instr
*alt
,
107 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
111 BUG_ON(nr_inst
!= 5);
113 if (!has_vhe() && !va_mask
)
116 for (i
= 0; i
< nr_inst
; i
++) {
117 u32 rd
, rn
, insn
, oinsn
;
120 * VHE doesn't need any address translation, let's NOP
123 * Alternatively, if we don't have any spare bits in
124 * the address, NOP everything after masking that
127 if (has_vhe() || (!tag_lsb
&& i
> 0)) {
128 updptr
[i
] = cpu_to_le32(aarch64_insn_gen_nop());
132 oinsn
= le32_to_cpu(origptr
[i
]);
133 rd
= aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD
, oinsn
);
134 rn
= aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN
, oinsn
);
136 insn
= compute_instruction(i
, rd
, rn
);
137 BUG_ON(insn
== AARCH64_BREAK_FAULT
);
139 updptr
[i
] = cpu_to_le32(insn
);
143 void *__kvm_bp_vect_base
;
144 int __kvm_harden_el2_vector_slot
;
146 void kvm_patch_vector_branch(struct alt_instr
*alt
,
147 __le32
*origptr
, __le32
*updptr
, int nr_inst
)
152 BUG_ON(nr_inst
!= 5);
154 if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS
)) {
155 WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS
));
163 * Compute HYP VA by using the same computation as kern_hyp_va()
165 addr
= (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector
);
167 addr
|= tag_val
<< tag_lsb
;
169 /* Use PC[10:7] to branch to the same vector in KVM */
170 addr
|= ((u64
)origptr
& GENMASK_ULL(10, 7));
173 * Branch over the preamble in order to avoid the initial store on
174 * the stack (which we already perform in the hardening vectors).
176 addr
+= KVM_VECTOR_PREAMBLE
;
178 /* stp x0, x1, [sp, #-16]! */
179 insn
= aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0
,
183 AARCH64_INSN_VARIANT_64BIT
,
184 AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX
);
185 *updptr
++ = cpu_to_le32(insn
);
187 /* movz x0, #(addr & 0xffff) */
188 insn
= aarch64_insn_gen_movewide(AARCH64_INSN_REG_0
,
191 AARCH64_INSN_VARIANT_64BIT
,
192 AARCH64_INSN_MOVEWIDE_ZERO
);
193 *updptr
++ = cpu_to_le32(insn
);
195 /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
196 insn
= aarch64_insn_gen_movewide(AARCH64_INSN_REG_0
,
199 AARCH64_INSN_VARIANT_64BIT
,
200 AARCH64_INSN_MOVEWIDE_KEEP
);
201 *updptr
++ = cpu_to_le32(insn
);
203 /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
204 insn
= aarch64_insn_gen_movewide(AARCH64_INSN_REG_0
,
207 AARCH64_INSN_VARIANT_64BIT
,
208 AARCH64_INSN_MOVEWIDE_KEEP
);
209 *updptr
++ = cpu_to_le32(insn
);
212 insn
= aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0
,
213 AARCH64_INSN_BRANCH_NOLINK
);
214 *updptr
++ = cpu_to_le32(insn
);