2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
9 #include <linux/kvm_host.h>
10 #include <linux/preempt.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14 #include <linux/bootmem.h>
15 #include <linux/init.h>
17 #include <asm/cputable.h>
18 #include <asm/kvm_ppc.h>
19 #include <asm/kvm_book3s.h>
21 #define KVM_LINEAR_RMA 0
22 #define KVM_LINEAR_HPT 1
24 static void __init
kvm_linear_init_one(ulong size
, int count
, int type
);
25 static struct kvmppc_linear_info
*kvm_alloc_linear(int type
);
26 static void kvm_release_linear(struct kvmppc_linear_info
*ri
);
28 int kvm_hpt_order
= KVM_DEFAULT_HPT_ORDER
;
29 EXPORT_SYMBOL_GPL(kvm_hpt_order
);
31 /*************** RMA *************/
34 * This maintains a list of RMAs (real mode areas) for KVM guests to use.
35 * Each RMA has to be physically contiguous and of a size that the
36 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
37 * and other larger sizes. Since we are unlikely to be allocate that
38 * much physically contiguous memory after the system is up and running,
39 * we preallocate a set of RMAs in early boot for KVM to use.
41 static unsigned long kvm_rma_size
= 64 << 20; /* 64MB */
42 static unsigned long kvm_rma_count
;
44 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
45 Assumes POWER7 or PPC970. */
46 static inline int lpcr_rmls(unsigned long rma_size
)
49 case 32ul << 20: /* 32 MB */
50 if (cpu_has_feature(CPU_FTR_ARCH_206
))
51 return 8; /* only supported on POWER7 */
53 case 64ul << 20: /* 64 MB */
55 case 128ul << 20: /* 128 MB */
57 case 256ul << 20: /* 256 MB */
59 case 1ul << 30: /* 1 GB */
61 case 16ul << 30: /* 16 GB */
63 case 256ul << 30: /* 256 GB */
70 static int __init
early_parse_rma_size(char *p
)
75 kvm_rma_size
= memparse(p
, &p
);
79 early_param("kvm_rma_size", early_parse_rma_size
);
81 static int __init
early_parse_rma_count(char *p
)
86 kvm_rma_count
= simple_strtoul(p
, NULL
, 0);
90 early_param("kvm_rma_count", early_parse_rma_count
);
92 struct kvmppc_linear_info
*kvm_alloc_rma(void)
94 return kvm_alloc_linear(KVM_LINEAR_RMA
);
96 EXPORT_SYMBOL_GPL(kvm_alloc_rma
);
98 void kvm_release_rma(struct kvmppc_linear_info
*ri
)
100 kvm_release_linear(ri
);
102 EXPORT_SYMBOL_GPL(kvm_release_rma
);
104 /*************** HPT *************/
107 * This maintains a list of big linear HPT tables that contain the GVA->HPA
108 * memory mappings. If we don't reserve those early on, we might not be able
109 * to get a big (usually 16MB) linear memory region from the kernel anymore.
112 static unsigned long kvm_hpt_count
;
114 static int __init
early_parse_hpt_count(char *p
)
119 kvm_hpt_count
= simple_strtoul(p
, NULL
, 0);
123 early_param("kvm_hpt_count", early_parse_hpt_count
);
125 struct kvmppc_linear_info
*kvm_alloc_hpt(void)
127 return kvm_alloc_linear(KVM_LINEAR_HPT
);
129 EXPORT_SYMBOL_GPL(kvm_alloc_hpt
);
131 void kvm_release_hpt(struct kvmppc_linear_info
*li
)
133 kvm_release_linear(li
);
135 EXPORT_SYMBOL_GPL(kvm_release_hpt
);
137 /*************** generic *************/
139 static LIST_HEAD(free_linears
);
140 static DEFINE_SPINLOCK(linear_lock
);
142 static void __init
kvm_linear_init_one(ulong size
, int count
, int type
)
145 unsigned long j
, npages
;
149 struct kvmppc_linear_info
*linear_info
;
154 typestr
= (type
== KVM_LINEAR_RMA
) ? "RMA" : "HPT";
156 npages
= size
>> PAGE_SHIFT
;
157 linear_info
= alloc_bootmem(count
* sizeof(struct kvmppc_linear_info
));
158 for (i
= 0; i
< count
; ++i
) {
159 linear
= alloc_bootmem_align(size
, size
);
160 pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr
, linear
,
162 linear_info
[i
].base_virt
= linear
;
163 linear_info
[i
].base_pfn
= __pa(linear
) >> PAGE_SHIFT
;
164 linear_info
[i
].npages
= npages
;
165 linear_info
[i
].type
= type
;
166 list_add_tail(&linear_info
[i
].list
, &free_linears
);
167 atomic_set(&linear_info
[i
].use_count
, 0);
169 pg
= pfn_to_page(linear_info
[i
].base_pfn
);
170 for (j
= 0; j
< npages
; ++j
) {
171 atomic_inc(&pg
->_count
);
177 static struct kvmppc_linear_info
*kvm_alloc_linear(int type
)
179 struct kvmppc_linear_info
*ri
, *ret
;
182 spin_lock(&linear_lock
);
183 list_for_each_entry(ri
, &free_linears
, list
) {
184 if (ri
->type
!= type
)
188 atomic_inc(&ri
->use_count
);
189 memset(ri
->base_virt
, 0, ri
->npages
<< PAGE_SHIFT
);
193 spin_unlock(&linear_lock
);
197 static void kvm_release_linear(struct kvmppc_linear_info
*ri
)
199 if (atomic_dec_and_test(&ri
->use_count
)) {
200 spin_lock(&linear_lock
);
201 list_add_tail(&ri
->list
, &free_linears
);
202 spin_unlock(&linear_lock
);
208 * Called at boot time while the bootmem allocator is active,
209 * to allocate contiguous physical memory for the hash page
212 void __init
kvm_linear_init(void)
215 kvm_linear_init_one(1 << kvm_hpt_order
, kvm_hpt_count
, KVM_LINEAR_HPT
);
218 /* Only do this on PPC970 in HV mode */
219 if (!cpu_has_feature(CPU_FTR_HVMODE
) ||
220 !cpu_has_feature(CPU_FTR_ARCH_201
))
223 if (!kvm_rma_size
|| !kvm_rma_count
)
226 /* Check that the requested size is one supported in hardware */
227 if (lpcr_rmls(kvm_rma_size
) < 0) {
228 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size
);
232 kvm_linear_init_one(kvm_rma_size
, kvm_rma_count
, KVM_LINEAR_RMA
);