Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / arch / powerpc / kvm / book3s_mmu_hpte.c
blob41cb0017e757a1d8ccd83360624d9f1a40189426
1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
31 #include "trace.h"
33 #define PTE_SIZE 12
35 static struct kmem_cache *hpte_cache;
37 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
39 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
42 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
44 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
45 HPTEG_HASH_BITS_PTE_LONG);
48 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
50 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
53 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
55 return hash_64((vpage & 0xffffff000ULL) >> 12,
56 HPTEG_HASH_BITS_VPTE_LONG);
59 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
61 u64 index;
62 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
64 trace_kvm_book3s_mmu_map(pte);
66 spin_lock(&vcpu3s->mmu_lock);
68 /* Add to ePTE list */
69 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
70 hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
72 /* Add to ePTE_long list */
73 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
74 hlist_add_head_rcu(&pte->list_pte_long,
75 &vcpu3s->hpte_hash_pte_long[index]);
77 /* Add to vPTE list */
78 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
79 hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
81 /* Add to vPTE_long list */
82 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
83 hlist_add_head_rcu(&pte->list_vpte_long,
84 &vcpu3s->hpte_hash_vpte_long[index]);
86 spin_unlock(&vcpu3s->mmu_lock);
89 static void free_pte_rcu(struct rcu_head *head)
91 struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
92 kmem_cache_free(hpte_cache, pte);
95 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
97 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
99 trace_kvm_book3s_mmu_invalidate(pte);
101 /* Different for 32 and 64 bit */
102 kvmppc_mmu_invalidate_pte(vcpu, pte);
104 spin_lock(&vcpu3s->mmu_lock);
106 /* pte already invalidated in between? */
107 if (hlist_unhashed(&pte->list_pte)) {
108 spin_unlock(&vcpu3s->mmu_lock);
109 return;
112 hlist_del_init_rcu(&pte->list_pte);
113 hlist_del_init_rcu(&pte->list_pte_long);
114 hlist_del_init_rcu(&pte->list_vpte);
115 hlist_del_init_rcu(&pte->list_vpte_long);
117 if (pte->pte.may_write)
118 kvm_release_pfn_dirty(pte->pfn);
119 else
120 kvm_release_pfn_clean(pte->pfn);
122 spin_unlock(&vcpu3s->mmu_lock);
124 vcpu3s->hpte_cache_count--;
125 call_rcu(&pte->rcu_head, free_pte_rcu);
128 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
130 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
131 struct hpte_cache *pte;
132 struct hlist_node *node;
133 int i;
135 rcu_read_lock();
137 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
138 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
140 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
141 invalidate_pte(vcpu, pte);
144 rcu_read_unlock();
147 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
149 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
150 struct hlist_head *list;
151 struct hlist_node *node;
152 struct hpte_cache *pte;
154 /* Find the list of entries in the map */
155 list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
157 rcu_read_lock();
159 /* Check the list for matching entries and invalidate */
160 hlist_for_each_entry_rcu(pte, node, list, list_pte)
161 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
162 invalidate_pte(vcpu, pte);
164 rcu_read_unlock();
167 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
169 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
170 struct hlist_head *list;
171 struct hlist_node *node;
172 struct hpte_cache *pte;
174 /* Find the list of entries in the map */
175 list = &vcpu3s->hpte_hash_pte_long[
176 kvmppc_mmu_hash_pte_long(guest_ea)];
178 rcu_read_lock();
180 /* Check the list for matching entries and invalidate */
181 hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
182 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
183 invalidate_pte(vcpu, pte);
185 rcu_read_unlock();
188 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
190 trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
191 guest_ea &= ea_mask;
193 switch (ea_mask) {
194 case ~0xfffUL:
195 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
196 break;
197 case 0x0ffff000:
198 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
199 break;
200 case 0:
201 /* Doing a complete flush -> start from scratch */
202 kvmppc_mmu_pte_flush_all(vcpu);
203 break;
204 default:
205 WARN_ON(1);
206 break;
210 /* Flush with mask 0xfffffffff */
211 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
213 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
214 struct hlist_head *list;
215 struct hlist_node *node;
216 struct hpte_cache *pte;
217 u64 vp_mask = 0xfffffffffULL;
219 list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
221 rcu_read_lock();
223 /* Check the list for matching entries and invalidate */
224 hlist_for_each_entry_rcu(pte, node, list, list_vpte)
225 if ((pte->pte.vpage & vp_mask) == guest_vp)
226 invalidate_pte(vcpu, pte);
228 rcu_read_unlock();
231 /* Flush with mask 0xffffff000 */
232 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
234 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
235 struct hlist_head *list;
236 struct hlist_node *node;
237 struct hpte_cache *pte;
238 u64 vp_mask = 0xffffff000ULL;
240 list = &vcpu3s->hpte_hash_vpte_long[
241 kvmppc_mmu_hash_vpte_long(guest_vp)];
243 rcu_read_lock();
245 /* Check the list for matching entries and invalidate */
246 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
247 if ((pte->pte.vpage & vp_mask) == guest_vp)
248 invalidate_pte(vcpu, pte);
250 rcu_read_unlock();
253 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
255 trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
256 guest_vp &= vp_mask;
258 switch(vp_mask) {
259 case 0xfffffffffULL:
260 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
261 break;
262 case 0xffffff000ULL:
263 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
264 break;
265 default:
266 WARN_ON(1);
267 return;
271 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
273 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
274 struct hlist_node *node;
275 struct hpte_cache *pte;
276 int i;
278 trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
280 rcu_read_lock();
282 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
283 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
285 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
286 if ((pte->pte.raddr >= pa_start) &&
287 (pte->pte.raddr < pa_end))
288 invalidate_pte(vcpu, pte);
291 rcu_read_unlock();
294 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
296 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
297 struct hpte_cache *pte;
299 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
300 vcpu3s->hpte_cache_count++;
302 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
303 kvmppc_mmu_pte_flush_all(vcpu);
305 return pte;
308 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
310 kvmppc_mmu_pte_flush(vcpu, 0, 0);
313 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
315 int i;
317 for (i = 0; i < len; i++)
318 INIT_HLIST_HEAD(&hash_list[i]);
321 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
323 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
325 /* init hpte lookup hashes */
326 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
327 ARRAY_SIZE(vcpu3s->hpte_hash_pte));
328 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
329 ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
330 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
331 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
332 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
333 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
335 spin_lock_init(&vcpu3s->mmu_lock);
337 return 0;
340 int kvmppc_mmu_hpte_sysinit(void)
342 /* init hpte slab cache */
343 hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
344 sizeof(struct hpte_cache), 0, NULL);
346 return 0;
349 void kvmppc_mmu_hpte_sysexit(void)
351 kmem_cache_destroy(hpte_cache);