1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2018, Red Hat, Inc.
8 #include <linux/compiler.h>
11 #include "guest_modes.h"
13 #include "processor.h"
14 #include "ucall_common.h"
16 #include <linux/bitfield.h>
17 #include <linux/sizes.h>
19 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
21 static vm_vaddr_t exception_handlers
;
23 static uint64_t page_align(struct kvm_vm
*vm
, uint64_t v
)
25 return (v
+ vm
->page_size
) & ~(vm
->page_size
- 1);
28 static uint64_t pgd_index(struct kvm_vm
*vm
, vm_vaddr_t gva
)
30 unsigned int shift
= (vm
->pgtable_levels
- 1) * (vm
->page_shift
- 3) + vm
->page_shift
;
31 uint64_t mask
= (1UL << (vm
->va_bits
- shift
)) - 1;
33 return (gva
>> shift
) & mask
;
36 static uint64_t pud_index(struct kvm_vm
*vm
, vm_vaddr_t gva
)
38 unsigned int shift
= 2 * (vm
->page_shift
- 3) + vm
->page_shift
;
39 uint64_t mask
= (1UL << (vm
->page_shift
- 3)) - 1;
41 TEST_ASSERT(vm
->pgtable_levels
== 4,
42 "Mode %d does not have 4 page table levels", vm
->mode
);
44 return (gva
>> shift
) & mask
;
47 static uint64_t pmd_index(struct kvm_vm
*vm
, vm_vaddr_t gva
)
49 unsigned int shift
= (vm
->page_shift
- 3) + vm
->page_shift
;
50 uint64_t mask
= (1UL << (vm
->page_shift
- 3)) - 1;
52 TEST_ASSERT(vm
->pgtable_levels
>= 3,
53 "Mode %d does not have >= 3 page table levels", vm
->mode
);
55 return (gva
>> shift
) & mask
;
58 static uint64_t pte_index(struct kvm_vm
*vm
, vm_vaddr_t gva
)
60 uint64_t mask
= (1UL << (vm
->page_shift
- 3)) - 1;
61 return (gva
>> vm
->page_shift
) & mask
;
64 static inline bool use_lpa2_pte_format(struct kvm_vm
*vm
)
66 return (vm
->page_size
== SZ_4K
|| vm
->page_size
== SZ_16K
) &&
67 (vm
->pa_bits
> 48 || vm
->va_bits
> 48);
70 static uint64_t addr_pte(struct kvm_vm
*vm
, uint64_t pa
, uint64_t attrs
)
74 if (use_lpa2_pte_format(vm
)) {
75 pte
= pa
& GENMASK(49, vm
->page_shift
);
76 pte
|= FIELD_GET(GENMASK(51, 50), pa
) << 8;
77 attrs
&= ~GENMASK(9, 8);
79 pte
= pa
& GENMASK(47, vm
->page_shift
);
80 if (vm
->page_shift
== 16)
81 pte
|= FIELD_GET(GENMASK(51, 48), pa
) << 12;
88 static uint64_t pte_addr(struct kvm_vm
*vm
, uint64_t pte
)
92 if (use_lpa2_pte_format(vm
)) {
93 pa
= pte
& GENMASK(49, vm
->page_shift
);
94 pa
|= FIELD_GET(GENMASK(9, 8), pte
) << 50;
96 pa
= pte
& GENMASK(47, vm
->page_shift
);
97 if (vm
->page_shift
== 16)
98 pa
|= FIELD_GET(GENMASK(15, 12), pte
) << 48;
104 static uint64_t ptrs_per_pgd(struct kvm_vm
*vm
)
106 unsigned int shift
= (vm
->pgtable_levels
- 1) * (vm
->page_shift
- 3) + vm
->page_shift
;
107 return 1 << (vm
->va_bits
- shift
);
110 static uint64_t __maybe_unused
ptrs_per_pte(struct kvm_vm
*vm
)
112 return 1 << (vm
->page_shift
- 3);
115 void virt_arch_pgd_alloc(struct kvm_vm
*vm
)
117 size_t nr_pages
= page_align(vm
, ptrs_per_pgd(vm
) * 8) / vm
->page_size
;
122 vm
->pgd
= vm_phy_pages_alloc(vm
, nr_pages
,
123 KVM_GUEST_PAGE_TABLE_MIN_PADDR
,
124 vm
->memslots
[MEM_REGION_PT
]);
125 vm
->pgd_created
= true;
128 static void _virt_pg_map(struct kvm_vm
*vm
, uint64_t vaddr
, uint64_t paddr
,
131 uint8_t attr_idx
= flags
& 7;
134 TEST_ASSERT((vaddr
% vm
->page_size
) == 0,
135 "Virtual address not on page boundary,\n"
136 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr
, vm
->page_size
);
137 TEST_ASSERT(sparsebit_is_set(vm
->vpages_valid
,
138 (vaddr
>> vm
->page_shift
)),
139 "Invalid virtual address, vaddr: 0x%lx", vaddr
);
140 TEST_ASSERT((paddr
% vm
->page_size
) == 0,
141 "Physical address not on page boundary,\n"
142 " paddr: 0x%lx vm->page_size: 0x%x", paddr
, vm
->page_size
);
143 TEST_ASSERT((paddr
>> vm
->page_shift
) <= vm
->max_gfn
,
144 "Physical address beyond beyond maximum supported,\n"
145 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
146 paddr
, vm
->max_gfn
, vm
->page_size
);
148 ptep
= addr_gpa2hva(vm
, vm
->pgd
) + pgd_index(vm
, vaddr
) * 8;
150 *ptep
= addr_pte(vm
, vm_alloc_page_table(vm
), 3);
152 switch (vm
->pgtable_levels
) {
154 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pud_index(vm
, vaddr
) * 8;
156 *ptep
= addr_pte(vm
, vm_alloc_page_table(vm
), 3);
159 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pmd_index(vm
, vaddr
) * 8;
161 *ptep
= addr_pte(vm
, vm_alloc_page_table(vm
), 3);
164 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pte_index(vm
, vaddr
) * 8;
167 TEST_FAIL("Page table levels must be 2, 3, or 4");
170 *ptep
= addr_pte(vm
, paddr
, (attr_idx
<< 2) | (1 << 10) | 3); /* AF */
173 void virt_arch_pg_map(struct kvm_vm
*vm
, uint64_t vaddr
, uint64_t paddr
)
175 uint64_t attr_idx
= MT_NORMAL
;
177 _virt_pg_map(vm
, vaddr
, paddr
, attr_idx
);
180 uint64_t *virt_get_pte_hva(struct kvm_vm
*vm
, vm_vaddr_t gva
)
184 if (!vm
->pgd_created
)
187 ptep
= addr_gpa2hva(vm
, vm
->pgd
) + pgd_index(vm
, gva
) * 8;
191 switch (vm
->pgtable_levels
) {
193 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pud_index(vm
, gva
) * 8;
198 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pmd_index(vm
, gva
) * 8;
203 ptep
= addr_gpa2hva(vm
, pte_addr(vm
, *ptep
)) + pte_index(vm
, gva
) * 8;
208 TEST_FAIL("Page table levels must be 2, 3, or 4");
214 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva
);
218 vm_paddr_t
addr_arch_gva2gpa(struct kvm_vm
*vm
, vm_vaddr_t gva
)
220 uint64_t *ptep
= virt_get_pte_hva(vm
, gva
);
222 return pte_addr(vm
, *ptep
) + (gva
& (vm
->page_size
- 1));
225 static void pte_dump(FILE *stream
, struct kvm_vm
*vm
, uint8_t indent
, uint64_t page
, int level
)
228 static const char * const type
[] = { "", "pud", "pmd", "pte" };
234 for (pte
= page
; pte
< page
+ ptrs_per_pte(vm
) * 8; pte
+= 8) {
235 ptep
= addr_gpa2hva(vm
, pte
);
238 fprintf(stream
, "%*s%s: %lx: %lx at %p\n", indent
, "", type
[level
], pte
, *ptep
, ptep
);
239 pte_dump(stream
, vm
, indent
+ 1, pte_addr(vm
, *ptep
), level
+ 1);
244 void virt_arch_dump(FILE *stream
, struct kvm_vm
*vm
, uint8_t indent
)
246 int level
= 4 - (vm
->pgtable_levels
- 1);
249 if (!vm
->pgd_created
)
252 for (pgd
= vm
->pgd
; pgd
< vm
->pgd
+ ptrs_per_pgd(vm
) * 8; pgd
+= 8) {
253 ptep
= addr_gpa2hva(vm
, pgd
);
256 fprintf(stream
, "%*spgd: %lx: %lx at %p\n", indent
, "", pgd
, *ptep
, ptep
);
257 pte_dump(stream
, vm
, indent
+ 1, pte_addr(vm
, *ptep
), level
);
261 void aarch64_vcpu_setup(struct kvm_vcpu
*vcpu
, struct kvm_vcpu_init
*init
)
263 struct kvm_vcpu_init default_init
= { .target
= -1, };
264 struct kvm_vm
*vm
= vcpu
->vm
;
265 uint64_t sctlr_el1
, tcr_el1
, ttbr0_el1
;
268 init
= &default_init
;
270 if (init
->target
== -1) {
271 struct kvm_vcpu_init preferred
;
272 vm_ioctl(vm
, KVM_ARM_PREFERRED_TARGET
, &preferred
);
273 init
->target
= preferred
.target
;
276 vcpu_ioctl(vcpu
, KVM_ARM_VCPU_INIT
, init
);
279 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
280 * registers, which the variable argument list macros do.
282 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_CPACR_EL1
), 3 << 20);
284 vcpu_get_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1
), &sctlr_el1
);
285 vcpu_get_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_TCR_EL1
), &tcr_el1
);
287 /* Configure base granule size */
289 case VM_MODE_PXXV48_4K
:
290 TEST_FAIL("AArch64 does not support 4K sized pages "
291 "with ANY-bit physical address ranges");
292 case VM_MODE_P52V48_64K
:
293 case VM_MODE_P48V48_64K
:
294 case VM_MODE_P40V48_64K
:
295 case VM_MODE_P36V48_64K
:
296 tcr_el1
|= 1ul << 14; /* TG0 = 64KB */
298 case VM_MODE_P52V48_16K
:
299 case VM_MODE_P48V48_16K
:
300 case VM_MODE_P40V48_16K
:
301 case VM_MODE_P36V48_16K
:
302 case VM_MODE_P36V47_16K
:
303 tcr_el1
|= 2ul << 14; /* TG0 = 16KB */
305 case VM_MODE_P52V48_4K
:
306 case VM_MODE_P48V48_4K
:
307 case VM_MODE_P40V48_4K
:
308 case VM_MODE_P36V48_4K
:
309 tcr_el1
|= 0ul << 14; /* TG0 = 4KB */
312 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm
->mode
);
315 ttbr0_el1
= vm
->pgd
& GENMASK(47, vm
->page_shift
);
317 /* Configure output size */
319 case VM_MODE_P52V48_4K
:
320 case VM_MODE_P52V48_16K
:
321 case VM_MODE_P52V48_64K
:
322 tcr_el1
|= 6ul << 32; /* IPS = 52 bits */
323 ttbr0_el1
|= FIELD_GET(GENMASK(51, 48), vm
->pgd
) << 2;
325 case VM_MODE_P48V48_4K
:
326 case VM_MODE_P48V48_16K
:
327 case VM_MODE_P48V48_64K
:
328 tcr_el1
|= 5ul << 32; /* IPS = 48 bits */
330 case VM_MODE_P40V48_4K
:
331 case VM_MODE_P40V48_16K
:
332 case VM_MODE_P40V48_64K
:
333 tcr_el1
|= 2ul << 32; /* IPS = 40 bits */
335 case VM_MODE_P36V48_4K
:
336 case VM_MODE_P36V48_16K
:
337 case VM_MODE_P36V48_64K
:
338 case VM_MODE_P36V47_16K
:
339 tcr_el1
|= 1ul << 32; /* IPS = 36 bits */
342 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm
->mode
);
345 sctlr_el1
|= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
346 /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
347 tcr_el1
|= (1 << 8) | (1 << 10) | (3 << 12);
348 tcr_el1
|= (64 - vm
->va_bits
) /* T0SZ */;
349 if (use_lpa2_pte_format(vm
))
350 tcr_el1
|= (1ul << 59) /* DS */;
352 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1
), sctlr_el1
);
353 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_TCR_EL1
), tcr_el1
);
354 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_MAIR_EL1
), DEFAULT_MAIR_EL1
);
355 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1
), ttbr0_el1
);
356 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1
), vcpu
->id
);
359 void vcpu_arch_dump(FILE *stream
, struct kvm_vcpu
*vcpu
, uint8_t indent
)
363 vcpu_get_reg(vcpu
, ARM64_CORE_REG(regs
.pstate
), &pstate
);
364 vcpu_get_reg(vcpu
, ARM64_CORE_REG(regs
.pc
), &pc
);
366 fprintf(stream
, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
367 indent
, "", pstate
, pc
);
370 void vcpu_arch_set_entry_point(struct kvm_vcpu
*vcpu
, void *guest_code
)
372 vcpu_set_reg(vcpu
, ARM64_CORE_REG(regs
.pc
), (uint64_t)guest_code
);
375 static struct kvm_vcpu
*__aarch64_vcpu_add(struct kvm_vm
*vm
, uint32_t vcpu_id
,
376 struct kvm_vcpu_init
*init
)
379 uint64_t stack_vaddr
;
380 struct kvm_vcpu
*vcpu
= __vm_vcpu_add(vm
, vcpu_id
);
382 stack_size
= vm
->page_size
== 4096 ? DEFAULT_STACK_PGS
* vm
->page_size
:
384 stack_vaddr
= __vm_vaddr_alloc(vm
, stack_size
,
385 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN
,
388 aarch64_vcpu_setup(vcpu
, init
);
390 vcpu_set_reg(vcpu
, ARM64_CORE_REG(sp_el1
), stack_vaddr
+ stack_size
);
394 struct kvm_vcpu
*aarch64_vcpu_add(struct kvm_vm
*vm
, uint32_t vcpu_id
,
395 struct kvm_vcpu_init
*init
, void *guest_code
)
397 struct kvm_vcpu
*vcpu
= __aarch64_vcpu_add(vm
, vcpu_id
, init
);
399 vcpu_arch_set_entry_point(vcpu
, guest_code
);
404 struct kvm_vcpu
*vm_arch_vcpu_add(struct kvm_vm
*vm
, uint32_t vcpu_id
)
406 return __aarch64_vcpu_add(vm
, vcpu_id
, NULL
);
409 void vcpu_args_set(struct kvm_vcpu
*vcpu
, unsigned int num
, ...)
414 TEST_ASSERT(num
>= 1 && num
<= 8, "Unsupported number of args,\n"
419 for (i
= 0; i
< num
; i
++) {
420 vcpu_set_reg(vcpu
, ARM64_CORE_REG(regs
.regs
[i
]),
421 va_arg(ap
, uint64_t));
427 void kvm_exit_unexpected_exception(int vector
, uint64_t ec
, bool valid_ec
)
429 ucall(UCALL_UNHANDLED
, 3, vector
, ec
, valid_ec
);
434 void assert_on_unhandled_exception(struct kvm_vcpu
*vcpu
)
438 if (get_ucall(vcpu
, &uc
) != UCALL_UNHANDLED
)
441 if (uc
.args
[2]) /* valid_ec */ {
442 assert(VECTOR_IS_SYNC(uc
.args
[0]));
443 TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)",
444 uc
.args
[0], uc
.args
[1]);
446 assert(!VECTOR_IS_SYNC(uc
.args
[0]));
447 TEST_FAIL("Unexpected exception (vector:0x%lx)",
453 handler_fn exception_handlers
[VECTOR_NUM
][ESR_ELx_EC_MAX
+ 1];
456 void vcpu_init_descriptor_tables(struct kvm_vcpu
*vcpu
)
460 vcpu_set_reg(vcpu
, KVM_ARM64_SYS_REG(SYS_VBAR_EL1
), (uint64_t)&vectors
);
463 void route_exception(struct ex_regs
*regs
, int vector
)
465 struct handlers
*handlers
= (struct handlers
*)exception_handlers
;
470 case VECTOR_SYNC_CURRENT
:
471 case VECTOR_SYNC_LOWER_64
:
472 ec
= ESR_ELx_EC(read_sysreg(esr_el1
));
475 case VECTOR_IRQ_CURRENT
:
476 case VECTOR_IRQ_LOWER_64
:
477 case VECTOR_FIQ_CURRENT
:
478 case VECTOR_FIQ_LOWER_64
:
479 case VECTOR_ERROR_CURRENT
:
480 case VECTOR_ERROR_LOWER_64
:
486 goto unexpected_exception
;
489 if (handlers
&& handlers
->exception_handlers
[vector
][ec
])
490 return handlers
->exception_handlers
[vector
][ec
](regs
);
492 unexpected_exception
:
493 kvm_exit_unexpected_exception(vector
, ec
, valid_ec
);
496 void vm_init_descriptor_tables(struct kvm_vm
*vm
)
498 vm
->handlers
= __vm_vaddr_alloc(vm
, sizeof(struct handlers
),
499 vm
->page_size
, MEM_REGION_DATA
);
501 *(vm_vaddr_t
*)addr_gva2hva(vm
, (vm_vaddr_t
)(&exception_handlers
)) = vm
->handlers
;
504 void vm_install_sync_handler(struct kvm_vm
*vm
, int vector
, int ec
,
505 void (*handler
)(struct ex_regs
*))
507 struct handlers
*handlers
= addr_gva2hva(vm
, vm
->handlers
);
509 assert(VECTOR_IS_SYNC(vector
));
510 assert(vector
< VECTOR_NUM
);
511 assert(ec
<= ESR_ELx_EC_MAX
);
512 handlers
->exception_handlers
[vector
][ec
] = handler
;
515 void vm_install_exception_handler(struct kvm_vm
*vm
, int vector
,
516 void (*handler
)(struct ex_regs
*))
518 struct handlers
*handlers
= addr_gva2hva(vm
, vm
->handlers
);
520 assert(!VECTOR_IS_SYNC(vector
));
521 assert(vector
< VECTOR_NUM
);
522 handlers
->exception_handlers
[vector
][0] = handler
;
525 uint32_t guest_get_vcpuid(void)
527 return read_sysreg(tpidr_el1
);
530 static uint32_t max_ipa_for_page_size(uint32_t vm_ipa
, uint32_t gran
,
531 uint32_t not_sup_val
, uint32_t ipa52_min_val
)
533 if (gran
== not_sup_val
)
535 else if (gran
>= ipa52_min_val
&& vm_ipa
>= 52)
538 return min(vm_ipa
, 48U);
541 void aarch64_get_supported_page_sizes(uint32_t ipa
, uint32_t *ipa4k
,
542 uint32_t *ipa16k
, uint32_t *ipa64k
)
544 struct kvm_vcpu_init preferred_init
;
545 int kvm_fd
, vm_fd
, vcpu_fd
, err
;
548 struct kvm_one_reg reg
= {
549 .id
= KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1
),
550 .addr
= (uint64_t)&val
,
553 kvm_fd
= open_kvm_dev_path_or_exit();
554 vm_fd
= __kvm_ioctl(kvm_fd
, KVM_CREATE_VM
, (void *)(unsigned long)ipa
);
555 TEST_ASSERT(vm_fd
>= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM
, vm_fd
));
557 vcpu_fd
= ioctl(vm_fd
, KVM_CREATE_VCPU
, 0);
558 TEST_ASSERT(vcpu_fd
>= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU
, vcpu_fd
));
560 err
= ioctl(vm_fd
, KVM_ARM_PREFERRED_TARGET
, &preferred_init
);
561 TEST_ASSERT(err
== 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET
, err
));
562 err
= ioctl(vcpu_fd
, KVM_ARM_VCPU_INIT
, &preferred_init
);
563 TEST_ASSERT(err
== 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT
, err
));
565 err
= ioctl(vcpu_fd
, KVM_GET_ONE_REG
, ®
);
566 TEST_ASSERT(err
== 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG
, vcpu_fd
));
568 gran
= FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4
), val
);
569 *ipa4k
= max_ipa_for_page_size(ipa
, gran
, ID_AA64MMFR0_EL1_TGRAN4_NI
,
570 ID_AA64MMFR0_EL1_TGRAN4_52_BIT
);
572 gran
= FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64
), val
);
573 *ipa64k
= max_ipa_for_page_size(ipa
, gran
, ID_AA64MMFR0_EL1_TGRAN64_NI
,
574 ID_AA64MMFR0_EL1_TGRAN64_IMP
);
576 gran
= FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16
), val
);
577 *ipa16k
= max_ipa_for_page_size(ipa
, gran
, ID_AA64MMFR0_EL1_TGRAN16_NI
,
578 ID_AA64MMFR0_EL1_TGRAN16_52_BIT
);
585 #define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5, \
587 asm volatile("mov w0, %w[function_id]\n" \
588 "mov x1, %[arg0]\n" \
589 "mov x2, %[arg1]\n" \
590 "mov x3, %[arg2]\n" \
591 "mov x4, %[arg3]\n" \
592 "mov x5, %[arg4]\n" \
593 "mov x6, %[arg5]\n" \
594 "mov x7, %[arg6]\n" \
596 "mov %[res0], x0\n" \
597 "mov %[res1], x1\n" \
598 "mov %[res2], x2\n" \
599 "mov %[res3], x3\n" \
600 : [res0] "=r"(res->a0), [res1] "=r"(res->a1), \
601 [res2] "=r"(res->a2), [res3] "=r"(res->a3) \
602 : [function_id] "r"(function_id), [arg0] "r"(arg0), \
603 [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3), \
604 [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6) \
605 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7")
608 void smccc_hvc(uint32_t function_id
, uint64_t arg0
, uint64_t arg1
,
609 uint64_t arg2
, uint64_t arg3
, uint64_t arg4
, uint64_t arg5
,
610 uint64_t arg6
, struct arm_smccc_res
*res
)
612 __smccc_call(hvc
, function_id
, arg0
, arg1
, arg2
, arg3
, arg4
, arg5
,
616 void smccc_smc(uint32_t function_id
, uint64_t arg0
, uint64_t arg1
,
617 uint64_t arg2
, uint64_t arg3
, uint64_t arg4
, uint64_t arg5
,
618 uint64_t arg6
, struct arm_smccc_res
*res
)
620 __smccc_call(smc
, function_id
, arg0
, arg1
, arg2
, arg3
, arg4
, arg5
,
624 void kvm_selftest_arch_init(void)
627 * arm64 doesn't have a true default mode, so start by computing the
628 * available IPA space and page sizes early.
630 guest_modes_append_default();
633 void vm_vaddr_populate_bitmap(struct kvm_vm
*vm
)
636 * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space
637 * is [0, 2^(64 - TCR_EL1.T0SZ)).
639 sparsebit_set_num(vm
->vpages_valid
, 0,
640 (1ULL << vm
->va_bits
) >> vm
->page_shift
);
643 /* Helper to call wfi instruction. */