kvm: take srcu lock around kvm_steal_time_set_preempted()
[linux/fpc-iii.git] / arch / powerpc / kvm / book3s_mmu_hpte.c
blob5a1ab1250a056f26b395357abb4baf9222fb2c84
1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
31 #include "trace_pr.h"
33 #define PTE_SIZE 12
35 static struct kmem_cache *hpte_cache;
37 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
39 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
42 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
44 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
45 HPTEG_HASH_BITS_PTE_LONG);
48 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
50 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
53 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
55 return hash_64((vpage & 0xffffff000ULL) >> 12,
56 HPTEG_HASH_BITS_VPTE_LONG);
59 #ifdef CONFIG_PPC_BOOK3S_64
60 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
62 return hash_64((vpage & 0xffffffff0ULL) >> 4,
63 HPTEG_HASH_BITS_VPTE_64K);
65 #endif
67 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
69 u64 index;
70 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
72 trace_kvm_book3s_mmu_map(pte);
74 spin_lock(&vcpu3s->mmu_lock);
76 /* Add to ePTE list */
77 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
78 hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
80 /* Add to ePTE_long list */
81 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
82 hlist_add_head_rcu(&pte->list_pte_long,
83 &vcpu3s->hpte_hash_pte_long[index]);
85 /* Add to vPTE list */
86 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
87 hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
89 /* Add to vPTE_long list */
90 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
91 hlist_add_head_rcu(&pte->list_vpte_long,
92 &vcpu3s->hpte_hash_vpte_long[index]);
94 #ifdef CONFIG_PPC_BOOK3S_64
95 /* Add to vPTE_64k list */
96 index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
97 hlist_add_head_rcu(&pte->list_vpte_64k,
98 &vcpu3s->hpte_hash_vpte_64k[index]);
99 #endif
101 vcpu3s->hpte_cache_count++;
103 spin_unlock(&vcpu3s->mmu_lock);
106 static void free_pte_rcu(struct rcu_head *head)
108 struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
109 kmem_cache_free(hpte_cache, pte);
112 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
114 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
116 trace_kvm_book3s_mmu_invalidate(pte);
118 /* Different for 32 and 64 bit */
119 kvmppc_mmu_invalidate_pte(vcpu, pte);
121 spin_lock(&vcpu3s->mmu_lock);
123 /* pte already invalidated in between? */
124 if (hlist_unhashed(&pte->list_pte)) {
125 spin_unlock(&vcpu3s->mmu_lock);
126 return;
129 hlist_del_init_rcu(&pte->list_pte);
130 hlist_del_init_rcu(&pte->list_pte_long);
131 hlist_del_init_rcu(&pte->list_vpte);
132 hlist_del_init_rcu(&pte->list_vpte_long);
133 #ifdef CONFIG_PPC_BOOK3S_64
134 hlist_del_init_rcu(&pte->list_vpte_64k);
135 #endif
136 vcpu3s->hpte_cache_count--;
138 spin_unlock(&vcpu3s->mmu_lock);
140 call_rcu(&pte->rcu_head, free_pte_rcu);
143 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
145 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
146 struct hpte_cache *pte;
147 int i;
149 rcu_read_lock();
151 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
152 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
154 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
155 invalidate_pte(vcpu, pte);
158 rcu_read_unlock();
161 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
163 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
164 struct hlist_head *list;
165 struct hpte_cache *pte;
167 /* Find the list of entries in the map */
168 list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
170 rcu_read_lock();
172 /* Check the list for matching entries and invalidate */
173 hlist_for_each_entry_rcu(pte, list, list_pte)
174 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
175 invalidate_pte(vcpu, pte);
177 rcu_read_unlock();
180 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
182 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
183 struct hlist_head *list;
184 struct hpte_cache *pte;
186 /* Find the list of entries in the map */
187 list = &vcpu3s->hpte_hash_pte_long[
188 kvmppc_mmu_hash_pte_long(guest_ea)];
190 rcu_read_lock();
192 /* Check the list for matching entries and invalidate */
193 hlist_for_each_entry_rcu(pte, list, list_pte_long)
194 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
195 invalidate_pte(vcpu, pte);
197 rcu_read_unlock();
200 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
202 trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
203 guest_ea &= ea_mask;
205 switch (ea_mask) {
206 case ~0xfffUL:
207 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
208 break;
209 case 0x0ffff000:
210 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
211 break;
212 case 0:
213 /* Doing a complete flush -> start from scratch */
214 kvmppc_mmu_pte_flush_all(vcpu);
215 break;
216 default:
217 WARN_ON(1);
218 break;
222 /* Flush with mask 0xfffffffff */
223 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
225 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
226 struct hlist_head *list;
227 struct hpte_cache *pte;
228 u64 vp_mask = 0xfffffffffULL;
230 list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
232 rcu_read_lock();
234 /* Check the list for matching entries and invalidate */
235 hlist_for_each_entry_rcu(pte, list, list_vpte)
236 if ((pte->pte.vpage & vp_mask) == guest_vp)
237 invalidate_pte(vcpu, pte);
239 rcu_read_unlock();
242 #ifdef CONFIG_PPC_BOOK3S_64
243 /* Flush with mask 0xffffffff0 */
244 static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
246 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
247 struct hlist_head *list;
248 struct hpte_cache *pte;
249 u64 vp_mask = 0xffffffff0ULL;
251 list = &vcpu3s->hpte_hash_vpte_64k[
252 kvmppc_mmu_hash_vpte_64k(guest_vp)];
254 rcu_read_lock();
256 /* Check the list for matching entries and invalidate */
257 hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
258 if ((pte->pte.vpage & vp_mask) == guest_vp)
259 invalidate_pte(vcpu, pte);
261 rcu_read_unlock();
263 #endif
265 /* Flush with mask 0xffffff000 */
266 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
268 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
269 struct hlist_head *list;
270 struct hpte_cache *pte;
271 u64 vp_mask = 0xffffff000ULL;
273 list = &vcpu3s->hpte_hash_vpte_long[
274 kvmppc_mmu_hash_vpte_long(guest_vp)];
276 rcu_read_lock();
278 /* Check the list for matching entries and invalidate */
279 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
280 if ((pte->pte.vpage & vp_mask) == guest_vp)
281 invalidate_pte(vcpu, pte);
283 rcu_read_unlock();
286 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
288 trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
289 guest_vp &= vp_mask;
291 switch(vp_mask) {
292 case 0xfffffffffULL:
293 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
294 break;
295 #ifdef CONFIG_PPC_BOOK3S_64
296 case 0xffffffff0ULL:
297 kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
298 break;
299 #endif
300 case 0xffffff000ULL:
301 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
302 break;
303 default:
304 WARN_ON(1);
305 return;
309 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
311 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
312 struct hpte_cache *pte;
313 int i;
315 trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
317 rcu_read_lock();
319 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
320 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
322 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
323 if ((pte->pte.raddr >= pa_start) &&
324 (pte->pte.raddr < pa_end))
325 invalidate_pte(vcpu, pte);
328 rcu_read_unlock();
331 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
333 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
334 struct hpte_cache *pte;
336 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
337 kvmppc_mmu_pte_flush_all(vcpu);
339 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
341 return pte;
344 void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
346 kmem_cache_free(hpte_cache, pte);
349 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
351 kvmppc_mmu_pte_flush(vcpu, 0, 0);
354 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
356 int i;
358 for (i = 0; i < len; i++)
359 INIT_HLIST_HEAD(&hash_list[i]);
362 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
364 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
366 /* init hpte lookup hashes */
367 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
368 ARRAY_SIZE(vcpu3s->hpte_hash_pte));
369 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
370 ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
371 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
372 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
373 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
374 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
375 #ifdef CONFIG_PPC_BOOK3S_64
376 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
377 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
378 #endif
380 spin_lock_init(&vcpu3s->mmu_lock);
382 return 0;
385 int kvmppc_mmu_hpte_sysinit(void)
387 /* init hpte slab cache */
388 hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
389 sizeof(struct hpte_cache), 0, NULL);
391 return 0;
394 void kvmppc_mmu_hpte_sysexit(void)
396 kmem_cache_destroy(hpte_cache);