ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
[linux/fpc-iii.git] / arch / arm64 / kvm / va_layout.c
bloba4f48c1ac28c09d4d91d188a90fa86dec4675457
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017 ARM Ltd.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
7 #include <linux/kvm_host.h>
8 #include <linux/random.h>
9 #include <linux/memblock.h>
10 #include <asm/alternative.h>
11 #include <asm/debug-monitors.h>
12 #include <asm/insn.h>
13 #include <asm/kvm_mmu.h>
16 * The LSB of the HYP VA tag
18 static u8 tag_lsb;
20 * The HYP VA tag value with the region bit
22 static u64 tag_val;
23 static u64 va_mask;
26 * We want to generate a hyp VA with the following format (with V ==
27 * vabits_actual):
29 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
30 * ---------------------------------------------------------
31 * | 0000000 | hyp_va_msb | random tag | kern linear VA |
32 * |--------- tag_val -----------|----- va_mask ---|
34 * which does not conflict with the idmap regions.
36 __init void kvm_compute_layout(void)
38 phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
39 u64 hyp_va_msb;
41 /* Where is my RAM region? */
42 hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
43 hyp_va_msb ^= BIT(vabits_actual - 1);
45 tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
46 (u64)(high_memory - 1));
48 va_mask = GENMASK_ULL(tag_lsb - 1, 0);
49 tag_val = hyp_va_msb;
51 if (tag_lsb != (vabits_actual - 1)) {
52 /* We have some free bits to insert a random tag. */
53 tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
55 tag_val >>= tag_lsb;
58 static u32 compute_instruction(int n, u32 rd, u32 rn)
60 u32 insn = AARCH64_BREAK_FAULT;
62 switch (n) {
63 case 0:
64 insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
65 AARCH64_INSN_VARIANT_64BIT,
66 rn, rd, va_mask);
67 break;
69 case 1:
70 /* ROR is a variant of EXTR with Rm = Rn */
71 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
72 rn, rn, rd,
73 tag_lsb);
74 break;
76 case 2:
77 insn = aarch64_insn_gen_add_sub_imm(rd, rn,
78 tag_val & GENMASK(11, 0),
79 AARCH64_INSN_VARIANT_64BIT,
80 AARCH64_INSN_ADSB_ADD);
81 break;
83 case 3:
84 insn = aarch64_insn_gen_add_sub_imm(rd, rn,
85 tag_val & GENMASK(23, 12),
86 AARCH64_INSN_VARIANT_64BIT,
87 AARCH64_INSN_ADSB_ADD);
88 break;
90 case 4:
91 /* ROR is a variant of EXTR with Rm = Rn */
92 insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
93 rn, rn, rd, 64 - tag_lsb);
94 break;
97 return insn;
100 void __init kvm_update_va_mask(struct alt_instr *alt,
101 __le32 *origptr, __le32 *updptr, int nr_inst)
103 int i;
105 BUG_ON(nr_inst != 5);
107 for (i = 0; i < nr_inst; i++) {
108 u32 rd, rn, insn, oinsn;
111 * VHE doesn't need any address translation, let's NOP
112 * everything.
114 * Alternatively, if the tag is zero (because the layout
115 * dictates it and we don't have any spare bits in the
116 * address), NOP everything after masking the kernel VA.
118 if (has_vhe() || (!tag_val && i > 0)) {
119 updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
120 continue;
123 oinsn = le32_to_cpu(origptr[i]);
124 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
125 rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
127 insn = compute_instruction(i, rd, rn);
128 BUG_ON(insn == AARCH64_BREAK_FAULT);
130 updptr[i] = cpu_to_le32(insn);
134 void *__kvm_bp_vect_base;
135 int __kvm_harden_el2_vector_slot;
137 void kvm_patch_vector_branch(struct alt_instr *alt,
138 __le32 *origptr, __le32 *updptr, int nr_inst)
140 u64 addr;
141 u32 insn;
143 BUG_ON(nr_inst != 5);
145 if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
146 WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS));
147 return;
151 * Compute HYP VA by using the same computation as kern_hyp_va()
153 addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
154 addr &= va_mask;
155 addr |= tag_val << tag_lsb;
157 /* Use PC[10:7] to branch to the same vector in KVM */
158 addr |= ((u64)origptr & GENMASK_ULL(10, 7));
161 * Branch over the preamble in order to avoid the initial store on
162 * the stack (which we already perform in the hardening vectors).
164 addr += KVM_VECTOR_PREAMBLE;
166 /* stp x0, x1, [sp, #-16]! */
167 insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
168 AARCH64_INSN_REG_1,
169 AARCH64_INSN_REG_SP,
170 -16,
171 AARCH64_INSN_VARIANT_64BIT,
172 AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
173 *updptr++ = cpu_to_le32(insn);
175 /* movz x0, #(addr & 0xffff) */
176 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
177 (u16)addr,
179 AARCH64_INSN_VARIANT_64BIT,
180 AARCH64_INSN_MOVEWIDE_ZERO);
181 *updptr++ = cpu_to_le32(insn);
183 /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
184 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
185 (u16)(addr >> 16),
187 AARCH64_INSN_VARIANT_64BIT,
188 AARCH64_INSN_MOVEWIDE_KEEP);
189 *updptr++ = cpu_to_le32(insn);
191 /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
192 insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
193 (u16)(addr >> 32),
195 AARCH64_INSN_VARIANT_64BIT,
196 AARCH64_INSN_MOVEWIDE_KEEP);
197 *updptr++ = cpu_to_le32(insn);
199 /* br x0 */
200 insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
201 AARCH64_INSN_BRANCH_NOLINK);
202 *updptr++ = cpu_to_le32(insn);