1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_mmu.h>
8 #include <asm/kvm_vcpu.h>
9 #include <asm/kvm_eiointc.h>
10 #include <asm/kvm_pch_pic.h>
12 const struct _kvm_stats_desc kvm_vm_stats_desc
[] = {
13 KVM_GENERIC_VM_STATS(),
14 STATS_DESC_ICOUNTER(VM
, pages
),
15 STATS_DESC_ICOUNTER(VM
, hugepages
),
18 const struct kvm_stats_header kvm_vm_stats_header
= {
19 .name_size
= KVM_STATS_NAME_SIZE
,
20 .num_desc
= ARRAY_SIZE(kvm_vm_stats_desc
),
21 .id_offset
= sizeof(struct kvm_stats_header
),
22 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
23 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
24 sizeof(kvm_vm_stats_desc
),
27 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
31 /* Allocate page table to map GPA -> RPA */
32 kvm
->arch
.pgd
= kvm_pgd_alloc();
36 kvm
->arch
.phyid_map
= kvzalloc(sizeof(struct kvm_phyid_map
), GFP_KERNEL_ACCOUNT
);
37 if (!kvm
->arch
.phyid_map
) {
38 free_page((unsigned long)kvm
->arch
.pgd
);
42 spin_lock_init(&kvm
->arch
.phyid_map_lock
);
46 /* Enable all PV features by default */
47 kvm
->arch
.pv_features
= BIT(KVM_FEATURE_IPI
);
48 if (kvm_pvtime_supported())
49 kvm
->arch
.pv_features
|= BIT(KVM_FEATURE_STEAL_TIME
);
51 kvm
->arch
.gpa_size
= BIT(cpu_vabits
- 1);
52 kvm
->arch
.root_level
= CONFIG_PGTABLE_LEVELS
- 1;
53 kvm
->arch
.invalid_ptes
[0] = 0;
54 kvm
->arch
.invalid_ptes
[1] = (unsigned long)invalid_pte_table
;
55 #if CONFIG_PGTABLE_LEVELS > 2
56 kvm
->arch
.invalid_ptes
[2] = (unsigned long)invalid_pmd_table
;
58 #if CONFIG_PGTABLE_LEVELS > 3
59 kvm
->arch
.invalid_ptes
[3] = (unsigned long)invalid_pud_table
;
61 for (i
= 0; i
<= kvm
->arch
.root_level
; i
++)
62 kvm
->arch
.pte_shifts
[i
] = PAGE_SHIFT
+ i
* (PAGE_SHIFT
- 3);
67 void kvm_arch_destroy_vm(struct kvm
*kvm
)
69 kvm_destroy_vcpus(kvm
);
70 free_page((unsigned long)kvm
->arch
.pgd
);
72 kvfree(kvm
->arch
.phyid_map
);
73 kvm
->arch
.phyid_map
= NULL
;
76 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
83 case KVM_CAP_ENABLE_CAP
:
84 case KVM_CAP_READONLY_MEM
:
85 case KVM_CAP_SYNC_MMU
:
86 case KVM_CAP_IMMEDIATE_EXIT
:
87 case KVM_CAP_IOEVENTFD
:
88 case KVM_CAP_MP_STATE
:
89 case KVM_CAP_SET_GUEST_DEBUG
:
92 case KVM_CAP_NR_VCPUS
:
93 r
= num_online_cpus();
95 case KVM_CAP_MAX_VCPUS
:
98 case KVM_CAP_MAX_VCPU_ID
:
101 case KVM_CAP_NR_MEMSLOTS
:
102 r
= KVM_USER_MEM_SLOTS
;
112 static int kvm_vm_feature_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
114 switch (attr
->attr
) {
115 case KVM_LOONGARCH_VM_FEAT_LSX
:
119 case KVM_LOONGARCH_VM_FEAT_LASX
:
123 case KVM_LOONGARCH_VM_FEAT_X86BT
:
127 case KVM_LOONGARCH_VM_FEAT_ARMBT
:
131 case KVM_LOONGARCH_VM_FEAT_MIPSBT
:
132 if (cpu_has_lbt_mips
)
135 case KVM_LOONGARCH_VM_FEAT_PMU
:
139 case KVM_LOONGARCH_VM_FEAT_PV_IPI
:
141 case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME
:
142 if (kvm_pvtime_supported())
150 static int kvm_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
152 switch (attr
->group
) {
153 case KVM_LOONGARCH_VM_FEAT_CTRL
:
154 return kvm_vm_feature_has_attr(kvm
, attr
);
160 int kvm_arch_vm_ioctl(struct file
*filp
, unsigned int ioctl
, unsigned long arg
)
162 void __user
*argp
= (void __user
*)arg
;
163 struct kvm
*kvm
= filp
->private_data
;
164 struct kvm_device_attr attr
;
167 case KVM_CREATE_IRQCHIP
:
169 case KVM_HAS_DEVICE_ATTR
:
170 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
173 return kvm_vm_has_attr(kvm
, &attr
);
179 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
, bool line_status
)
181 if (!kvm_arch_irqchip_in_kernel(kvm
))
184 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
185 irq_event
->irq
, irq_event
->level
, line_status
);
190 bool kvm_arch_irqchip_in_kernel(struct kvm
*kvm
)
192 return (kvm
->arch
.ipi
&& kvm
->arch
.eiointc
&& kvm
->arch
.pch_pic
);