Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux/fpc-iii.git] / arch / powerpc / kvm / book3s_hv_builtin.c
blob8cd0daebb82deea5b6785dbba4e55d12d3d6ac88
1 /*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
9 #include <linux/kvm_host.h>
10 #include <linux/preempt.h>
11 #include <linux/export.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14 #include <linux/bootmem.h>
15 #include <linux/init.h>
16 #include <linux/memblock.h>
17 #include <linux/sizes.h>
19 #include <asm/cputable.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
23 #include "book3s_hv_cma.h"
25 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
26 * should be power of 2.
28 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
30 * By default we reserve 5% of memory for hash pagetable allocation.
32 static unsigned long kvm_cma_resv_ratio = 5;
34 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
35 * Each RMA has to be physically contiguous and of a size that the
36 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
37 * and other larger sizes. Since we are unlikely to be allocate that
38 * much physically contiguous memory after the system is up and running,
39 * we preallocate a set of RMAs in early boot using CMA.
40 * should be power of 2.
42 unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
43 EXPORT_SYMBOL_GPL(kvm_rma_pages);
45 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
46 Assumes POWER7 or PPC970. */
47 static inline int lpcr_rmls(unsigned long rma_size)
49 switch (rma_size) {
50 case 32ul << 20: /* 32 MB */
51 if (cpu_has_feature(CPU_FTR_ARCH_206))
52 return 8; /* only supported on POWER7 */
53 return -1;
54 case 64ul << 20: /* 64 MB */
55 return 3;
56 case 128ul << 20: /* 128 MB */
57 return 7;
58 case 256ul << 20: /* 256 MB */
59 return 4;
60 case 1ul << 30: /* 1 GB */
61 return 2;
62 case 16ul << 30: /* 16 GB */
63 return 1;
64 case 256ul << 30: /* 256 GB */
65 return 0;
66 default:
67 return -1;
71 static int __init early_parse_rma_size(char *p)
73 unsigned long kvm_rma_size;
75 pr_debug("%s(%s)\n", __func__, p);
76 if (!p)
77 return -EINVAL;
78 kvm_rma_size = memparse(p, &p);
80 * Check that the requested size is one supported in hardware
82 if (lpcr_rmls(kvm_rma_size) < 0) {
83 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
84 return -EINVAL;
86 kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
87 return 0;
89 early_param("kvm_rma_size", early_parse_rma_size);
91 struct kvm_rma_info *kvm_alloc_rma()
93 struct page *page;
94 struct kvm_rma_info *ri;
96 ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
97 if (!ri)
98 return NULL;
99 page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
100 if (!page)
101 goto err_out;
102 atomic_set(&ri->use_count, 1);
103 ri->base_pfn = page_to_pfn(page);
104 return ri;
105 err_out:
106 kfree(ri);
107 return NULL;
109 EXPORT_SYMBOL_GPL(kvm_alloc_rma);
111 void kvm_release_rma(struct kvm_rma_info *ri)
113 if (atomic_dec_and_test(&ri->use_count)) {
114 kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
115 kfree(ri);
118 EXPORT_SYMBOL_GPL(kvm_release_rma);
120 static int __init early_parse_kvm_cma_resv(char *p)
122 pr_debug("%s(%s)\n", __func__, p);
123 if (!p)
124 return -EINVAL;
125 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
127 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
129 struct page *kvm_alloc_hpt(unsigned long nr_pages)
131 unsigned long align_pages = HPT_ALIGN_PAGES;
133 /* Old CPUs require HPT aligned on a multiple of its size */
134 if (!cpu_has_feature(CPU_FTR_ARCH_206))
135 align_pages = nr_pages;
136 return kvm_alloc_cma(nr_pages, align_pages);
138 EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
140 void kvm_release_hpt(struct page *page, unsigned long nr_pages)
142 kvm_release_cma(page, nr_pages);
144 EXPORT_SYMBOL_GPL(kvm_release_hpt);
147 * kvm_cma_reserve() - reserve area for kvm hash pagetable
149 * This function reserves memory from early allocator. It should be
150 * called by arch specific code once the early allocator (memblock or bootmem)
151 * has been activated and all other subsystems have already allocated/reserved
152 * memory.
154 void __init kvm_cma_reserve(void)
156 unsigned long align_size;
157 struct memblock_region *reg;
158 phys_addr_t selected_size = 0;
160 * We cannot use memblock_phys_mem_size() here, because
161 * memblock_analyze() has not been called yet.
163 for_each_memblock(memory, reg)
164 selected_size += memblock_region_memory_end_pfn(reg) -
165 memblock_region_memory_base_pfn(reg);
167 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
168 if (selected_size) {
169 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
170 (unsigned long)selected_size / SZ_1M);
172 * Old CPUs require HPT aligned on a multiple of its size. So for them
173 * make the alignment as max size we could request.
175 if (!cpu_has_feature(CPU_FTR_ARCH_206))
176 align_size = __rounddown_pow_of_two(selected_size);
177 else
178 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
180 align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
181 kvm_cma_declare_contiguous(selected_size, align_size);