1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2018, Red Hat, Inc.
8 #include <linux/compiler.h>
11 #include "../kvm_util_internal.h"
12 #include "processor.h"
14 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
15 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
17 static uint64_t page_align(struct kvm_vm
*vm
, uint64_t v
)
19 return (v
+ vm
->page_size
) & ~(vm
->page_size
- 1);
22 static uint64_t pgd_index(struct kvm_vm
*vm
, vm_vaddr_t gva
)
24 unsigned int shift
= (vm
->pgtable_levels
- 1) * (vm
->page_shift
- 3) + vm
->page_shift
;
25 uint64_t mask
= (1UL << (vm
->va_bits
- shift
)) - 1;
27 return (gva
>> shift
) & mask
;
30 static uint64_t pud_index(struct kvm_vm
*vm
, vm_vaddr_t gva
)
32 unsigned int shift
= 2 * (vm
->page_shift
- 3) + vm
->page_shift
;
33 uint64_t mask
= (1UL << (vm
->page_shift
- 3)) - 1;
35 TEST_ASSERT(vm
->pgtable_levels
== 4,
36 "Mode %d does not have 4 page table levels", vm
->mode
);
38 return (gva
>> shift
) & mask
;
41 static uint64_t pmd_index(struct kvm_vm
*vm
, vm_vaddr_t gva
)
43 unsigned int shift
= (vm
->page_shift
- 3) + vm
->page_shift
;
44 uint64_t mask
= (1UL << (vm
->page_shift
- 3)) - 1;
46 TEST_ASSERT(vm
->pgtable_levels
>= 3,
47 "Mode %d does not have >= 3 page table levels", vm
->mode
);
49 return (gva
>> shift
) & mask
;
52 static uint64_t pte_index(struct kvm_vm
*vm
, vm_vaddr_t gva
)
54 uint64_t mask
= (1UL << (vm
->page_shift
- 3)) - 1;
55 return (gva
>> vm
->page_shift
) & mask
;
58 static uint64_t pte_addr(struct kvm_vm
*vm
, uint64_t entry
)
60 uint64_t mask
= ((1UL << (vm
->va_bits
- vm
->page_shift
)) - 1) << vm
->page_shift
;
64 static uint64_t ptrs_per_pgd(struct kvm_vm
*vm
)
66 unsigned int shift
= (vm
->pgtable_levels
- 1) * (vm
->page_shift
- 3) + vm
->page_shift
;
67 return 1 << (vm
->va_bits
- shift
);
70 static uint64_t __maybe_unused
ptrs_per_pte(struct kvm_vm
*vm
)
72 return 1 << (vm
->page_shift
- 3);
75 void virt_pgd_alloc(struct kvm_vm
*vm
, uint32_t pgd_memslot
)
77 if (!vm
->pgd_created
) {
78 vm_paddr_t paddr
= vm_phy_pages_alloc(vm
,
79 page_align(vm
, ptrs_per_pgd(vm
) * 8) / vm
->page_size
,
80 KVM_GUEST_PAGE_TABLE_MIN_PADDR
, pgd_memslot
);
82 vm
->pgd_created
= true;
86 void _virt_pg_map(struct kvm_vm
*vm
, uint64_t vaddr
, uint64_t paddr
,
87 uint32_t pgd_memslot
, uint64_t flags
)
89 uint8_t attr_idx
= flags
& 7;
92 TEST_ASSERT((vaddr
% vm
->page_size
) == 0,
93 "Virtual address not on page boundary,\n"
94 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr
, vm
->page_size
);
95 TEST_ASSERT(sparsebit_is_set(vm
->vpages_valid
,
96 (vaddr
>> vm
->page_shift
)),
97 "Invalid virtual address, vaddr: 0x%lx", vaddr
);
98 TEST_ASSERT((paddr
% vm
->page_size
) == 0,
99 "Physical address not on page boundary,\n"
100 " paddr: 0x%lx vm->page_size: 0x%x", paddr
, vm
->page_size
);
101 TEST_ASSERT((paddr
>> vm
->page_shift
) <= vm
->max_gfn
,
102 "Physical address beyond beyond maximum supported,\n"
103 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
104 paddr
, vm
->max_gfn
, vm
->page_size
);
106 ptep
= addr_gpa2hva(vm
, vm
->pgd
) + pgd_index(vm
, vaddr
) * 8;
108 *ptep
= vm_phy_page_alloc(vm
, KVM_GUEST_PAGE_TABLE_MIN_PADDR
, pgd_memslot
);
112 switch (vm
->pgtable_levels
) {
114 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pud_index(vm
, vaddr
) * 8;
116 *ptep
= vm_phy_page_alloc(vm
, KVM_GUEST_PAGE_TABLE_MIN_PADDR
, pgd_memslot
);
121 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pmd_index(vm
, vaddr
) * 8;
123 *ptep
= vm_phy_page_alloc(vm
, KVM_GUEST_PAGE_TABLE_MIN_PADDR
, pgd_memslot
);
128 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pte_index(vm
, vaddr
) * 8;
131 TEST_FAIL("Page table levels must be 2, 3, or 4");
135 *ptep
|= (attr_idx
<< 2) | (1 << 10) /* Access Flag */;
138 void virt_pg_map(struct kvm_vm
*vm
, uint64_t vaddr
, uint64_t paddr
,
139 uint32_t pgd_memslot
)
141 uint64_t attr_idx
= 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
143 _virt_pg_map(vm
, vaddr
, paddr
, pgd_memslot
, attr_idx
);
146 vm_paddr_t
addr_gva2gpa(struct kvm_vm
*vm
, vm_vaddr_t gva
)
150 if (!vm
->pgd_created
)
153 ptep
= addr_gpa2hva(vm
, vm
->pgd
) + pgd_index(vm
, gva
) * 8;
157 switch (vm
->pgtable_levels
) {
159 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pud_index(vm
, gva
) * 8;
164 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pmd_index(vm
, gva
) * 8;
169 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pte_index(vm
, gva
) * 8;
174 TEST_FAIL("Page table levels must be 2, 3, or 4");
177 return pte_addr(vm
, *ptep
) + (gva
& (vm
->page_size
- 1));
180 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva
);
184 static void pte_dump(FILE *stream
, struct kvm_vm
*vm
, uint8_t indent
, uint64_t page
, int level
)
187 static const char * const type
[] = { "", "pud", "pmd", "pte" };
193 for (pte
= page
; pte
< page
+ ptrs_per_pte(vm
) * 8; pte
+= 8) {
194 ptep
= addr_gpa2hva(vm
, pte
);
197 fprintf(stream
, "%*s%s: %lx: %lx at %p\n", indent
, "", type
[level
], pte
, *ptep
, ptep
);
198 pte_dump(stream
, vm
, indent
+ 1, pte_addr(vm
, *ptep
), level
+ 1);
203 void virt_dump(FILE *stream
, struct kvm_vm
*vm
, uint8_t indent
)
205 int level
= 4 - (vm
->pgtable_levels
- 1);
208 if (!vm
->pgd_created
)
211 for (pgd
= vm
->pgd
; pgd
< vm
->pgd
+ ptrs_per_pgd(vm
) * 8; pgd
+= 8) {
212 ptep
= addr_gpa2hva(vm
, pgd
);
215 fprintf(stream
, "%*spgd: %lx: %lx at %p\n", indent
, "", pgd
, *ptep
, ptep
);
216 pte_dump(stream
, vm
, indent
+ 1, pte_addr(vm
, *ptep
), level
);
220 void aarch64_vcpu_setup(struct kvm_vm
*vm
, int vcpuid
, struct kvm_vcpu_init
*init
)
222 struct kvm_vcpu_init default_init
= { .target
= -1, };
223 uint64_t sctlr_el1
, tcr_el1
;
226 init
= &default_init
;
228 if (init
->target
== -1) {
229 struct kvm_vcpu_init preferred
;
230 vm_ioctl(vm
, KVM_ARM_PREFERRED_TARGET
, &preferred
);
231 init
->target
= preferred
.target
;
234 vcpu_ioctl(vm
, vcpuid
, KVM_ARM_VCPU_INIT
, init
);
237 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
238 * registers, which the variable argument list macros do.
240 set_reg(vm
, vcpuid
, ARM64_SYS_REG(CPACR_EL1
), 3 << 20);
242 get_reg(vm
, vcpuid
, ARM64_SYS_REG(SCTLR_EL1
), &sctlr_el1
);
243 get_reg(vm
, vcpuid
, ARM64_SYS_REG(TCR_EL1
), &tcr_el1
);
246 case VM_MODE_P52V48_4K
:
247 TEST_FAIL("AArch64 does not support 4K sized pages "
248 "with 52-bit physical address ranges");
249 case VM_MODE_PXXV48_4K
:
250 TEST_FAIL("AArch64 does not support 4K sized pages "
251 "with ANY-bit physical address ranges");
252 case VM_MODE_P52V48_64K
:
253 tcr_el1
|= 1ul << 14; /* TG0 = 64KB */
254 tcr_el1
|= 6ul << 32; /* IPS = 52 bits */
256 case VM_MODE_P48V48_4K
:
257 tcr_el1
|= 0ul << 14; /* TG0 = 4KB */
258 tcr_el1
|= 5ul << 32; /* IPS = 48 bits */
260 case VM_MODE_P48V48_64K
:
261 tcr_el1
|= 1ul << 14; /* TG0 = 64KB */
262 tcr_el1
|= 5ul << 32; /* IPS = 48 bits */
264 case VM_MODE_P40V48_4K
:
265 tcr_el1
|= 0ul << 14; /* TG0 = 4KB */
266 tcr_el1
|= 2ul << 32; /* IPS = 40 bits */
268 case VM_MODE_P40V48_64K
:
269 tcr_el1
|= 1ul << 14; /* TG0 = 64KB */
270 tcr_el1
|= 2ul << 32; /* IPS = 40 bits */
273 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm
->mode
);
276 sctlr_el1
|= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
277 /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
278 tcr_el1
|= (1 << 8) | (1 << 10) | (3 << 12);
279 tcr_el1
|= (64 - vm
->va_bits
) /* T0SZ */;
281 set_reg(vm
, vcpuid
, ARM64_SYS_REG(SCTLR_EL1
), sctlr_el1
);
282 set_reg(vm
, vcpuid
, ARM64_SYS_REG(TCR_EL1
), tcr_el1
);
283 set_reg(vm
, vcpuid
, ARM64_SYS_REG(MAIR_EL1
), DEFAULT_MAIR_EL1
);
284 set_reg(vm
, vcpuid
, ARM64_SYS_REG(TTBR0_EL1
), vm
->pgd
);
287 void vcpu_dump(FILE *stream
, struct kvm_vm
*vm
, uint32_t vcpuid
, uint8_t indent
)
291 get_reg(vm
, vcpuid
, ARM64_CORE_REG(regs
.pstate
), &pstate
);
292 get_reg(vm
, vcpuid
, ARM64_CORE_REG(regs
.pc
), &pc
);
294 fprintf(stream
, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
295 indent
, "", pstate
, pc
);
298 void aarch64_vcpu_add_default(struct kvm_vm
*vm
, uint32_t vcpuid
,
299 struct kvm_vcpu_init
*init
, void *guest_code
)
301 size_t stack_size
= vm
->page_size
== 4096 ?
302 DEFAULT_STACK_PGS
* vm
->page_size
:
304 uint64_t stack_vaddr
= vm_vaddr_alloc(vm
, stack_size
,
305 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN
, 0, 0);
307 vm_vcpu_add(vm
, vcpuid
);
308 aarch64_vcpu_setup(vm
, vcpuid
, init
);
310 set_reg(vm
, vcpuid
, ARM64_CORE_REG(sp_el1
), stack_vaddr
+ stack_size
);
311 set_reg(vm
, vcpuid
, ARM64_CORE_REG(regs
.pc
), (uint64_t)guest_code
);
314 void vm_vcpu_add_default(struct kvm_vm
*vm
, uint32_t vcpuid
, void *guest_code
)
316 aarch64_vcpu_add_default(vm
, vcpuid
, NULL
, guest_code
);
319 void vcpu_args_set(struct kvm_vm
*vm
, uint32_t vcpuid
, unsigned int num
, ...)
324 TEST_ASSERT(num
>= 1 && num
<= 8, "Unsupported number of args,\n"
329 for (i
= 0; i
< num
; i
++) {
330 set_reg(vm
, vcpuid
, ARM64_CORE_REG(regs
.regs
[i
]),
331 va_arg(ap
, uint64_t));
337 void assert_on_unhandled_exception(struct kvm_vm
*vm
, uint32_t vcpuid
)