Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / arm64 / include / asm / kvm_pkvm.h
blobcd56acd9a842ccfaaa486cb4fef22e3777b90172
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 - Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6 #ifndef __ARM64_KVM_PKVM_H__
7 #define __ARM64_KVM_PKVM_H__
9 #include <linux/arm_ffa.h>
10 #include <linux/memblock.h>
11 #include <linux/scatterlist.h>
12 #include <asm/kvm_pgtable.h>
14 /* Maximum number of VMs that can co-exist under pKVM. */
15 #define KVM_MAX_PVMS 255
17 #define HYP_MEMBLOCK_REGIONS 128
19 int pkvm_init_host_vm(struct kvm *kvm);
20 int pkvm_create_hyp_vm(struct kvm *kvm);
21 void pkvm_destroy_hyp_vm(struct kvm *kvm);
23 extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
24 extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
26 static inline unsigned long
27 hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
29 unsigned long nr_pages = reg->size >> PAGE_SHIFT;
30 unsigned long start, end;
32 start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
33 end = start + nr_pages * vmemmap_entry_size;
34 start = ALIGN_DOWN(start, PAGE_SIZE);
35 end = ALIGN(end, PAGE_SIZE);
37 return end - start;
40 static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
42 unsigned long res = 0, i;
44 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
45 res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
46 vmemmap_entry_size);
49 return res >> PAGE_SHIFT;
52 static inline unsigned long hyp_vm_table_pages(void)
54 return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
57 static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
59 unsigned long total = 0;
60 int i;
62 /* Provision the worst case scenario */
63 for (i = KVM_PGTABLE_FIRST_LEVEL; i <= KVM_PGTABLE_LAST_LEVEL; i++) {
64 nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
65 total += nr_pages;
68 return total;
71 static inline unsigned long __hyp_pgtable_total_pages(void)
73 unsigned long res = 0, i;
75 /* Cover all of memory with page-granularity */
76 for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
77 struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
78 res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
81 return res;
84 static inline unsigned long hyp_s1_pgtable_pages(void)
86 unsigned long res;
88 res = __hyp_pgtable_total_pages();
90 /* Allow 1 GiB for private mappings */
91 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
93 return res;
96 static inline unsigned long host_s2_pgtable_pages(void)
98 unsigned long res;
101 * Include an extra 16 pages to safely upper-bound the worst case of
102 * concatenated pgds.
104 res = __hyp_pgtable_total_pages() + 16;
106 /* Allow 1 GiB for MMIO mappings */
107 res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
109 return res;
112 #define KVM_FFA_MBOX_NR_PAGES 1
114 static inline unsigned long hyp_ffa_proxy_pages(void)
116 size_t desc_max;
119 * The hypervisor FFA proxy needs enough memory to buffer a fragmented
120 * descriptor returned from EL3 in response to a RETRIEVE_REQ call.
122 desc_max = sizeof(struct ffa_mem_region) +
123 sizeof(struct ffa_mem_region_attributes) +
124 sizeof(struct ffa_composite_mem_region) +
125 SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
127 /* Plus a page each for the hypervisor's RX and TX mailboxes. */
128 return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
131 static inline size_t pkvm_host_sve_state_size(void)
133 if (!system_supports_sve())
134 return 0;
136 return size_add(sizeof(struct cpu_sve_state),
137 SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
140 #endif /* __ARM64_KVM_PKVM_H__ */