treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / powerpc / kvm / book3s_mmu_hpte.c
blobce79ac33e8d3b92db09aa279a54dd1443023dd65
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Authors:
6 * Alexander Graf <agraf@suse.de>
7 */
9 #include <linux/kvm_host.h>
10 #include <linux/hash.h>
11 #include <linux/slab.h>
12 #include <linux/rculist.h>
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/machdep.h>
17 #include <asm/mmu_context.h>
18 #include <asm/hw_irq.h>
20 #include "trace_pr.h"
22 #define PTE_SIZE 12
24 static struct kmem_cache *hpte_cache;
26 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
28 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
31 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
33 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
34 HPTEG_HASH_BITS_PTE_LONG);
37 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
39 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
42 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
44 return hash_64((vpage & 0xffffff000ULL) >> 12,
45 HPTEG_HASH_BITS_VPTE_LONG);
48 #ifdef CONFIG_PPC_BOOK3S_64
49 static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
51 return hash_64((vpage & 0xffffffff0ULL) >> 4,
52 HPTEG_HASH_BITS_VPTE_64K);
54 #endif
56 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
58 u64 index;
59 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
61 trace_kvm_book3s_mmu_map(pte);
63 spin_lock(&vcpu3s->mmu_lock);
65 /* Add to ePTE list */
66 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
67 hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
69 /* Add to ePTE_long list */
70 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
71 hlist_add_head_rcu(&pte->list_pte_long,
72 &vcpu3s->hpte_hash_pte_long[index]);
74 /* Add to vPTE list */
75 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
76 hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
78 /* Add to vPTE_long list */
79 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
80 hlist_add_head_rcu(&pte->list_vpte_long,
81 &vcpu3s->hpte_hash_vpte_long[index]);
83 #ifdef CONFIG_PPC_BOOK3S_64
84 /* Add to vPTE_64k list */
85 index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
86 hlist_add_head_rcu(&pte->list_vpte_64k,
87 &vcpu3s->hpte_hash_vpte_64k[index]);
88 #endif
90 vcpu3s->hpte_cache_count++;
92 spin_unlock(&vcpu3s->mmu_lock);
95 static void free_pte_rcu(struct rcu_head *head)
97 struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
98 kmem_cache_free(hpte_cache, pte);
101 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
103 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
105 trace_kvm_book3s_mmu_invalidate(pte);
107 /* Different for 32 and 64 bit */
108 kvmppc_mmu_invalidate_pte(vcpu, pte);
110 spin_lock(&vcpu3s->mmu_lock);
112 /* pte already invalidated in between? */
113 if (hlist_unhashed(&pte->list_pte)) {
114 spin_unlock(&vcpu3s->mmu_lock);
115 return;
118 hlist_del_init_rcu(&pte->list_pte);
119 hlist_del_init_rcu(&pte->list_pte_long);
120 hlist_del_init_rcu(&pte->list_vpte);
121 hlist_del_init_rcu(&pte->list_vpte_long);
122 #ifdef CONFIG_PPC_BOOK3S_64
123 hlist_del_init_rcu(&pte->list_vpte_64k);
124 #endif
125 vcpu3s->hpte_cache_count--;
127 spin_unlock(&vcpu3s->mmu_lock);
129 call_rcu(&pte->rcu_head, free_pte_rcu);
132 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
134 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
135 struct hpte_cache *pte;
136 int i;
138 rcu_read_lock();
140 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
141 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
143 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
144 invalidate_pte(vcpu, pte);
147 rcu_read_unlock();
150 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
152 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
153 struct hlist_head *list;
154 struct hpte_cache *pte;
156 /* Find the list of entries in the map */
157 list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
159 rcu_read_lock();
161 /* Check the list for matching entries and invalidate */
162 hlist_for_each_entry_rcu(pte, list, list_pte)
163 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
164 invalidate_pte(vcpu, pte);
166 rcu_read_unlock();
169 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
171 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
172 struct hlist_head *list;
173 struct hpte_cache *pte;
175 /* Find the list of entries in the map */
176 list = &vcpu3s->hpte_hash_pte_long[
177 kvmppc_mmu_hash_pte_long(guest_ea)];
179 rcu_read_lock();
181 /* Check the list for matching entries and invalidate */
182 hlist_for_each_entry_rcu(pte, list, list_pte_long)
183 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
184 invalidate_pte(vcpu, pte);
186 rcu_read_unlock();
189 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
191 trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
192 guest_ea &= ea_mask;
194 switch (ea_mask) {
195 case ~0xfffUL:
196 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
197 break;
198 case 0x0ffff000:
199 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
200 break;
201 case 0:
202 /* Doing a complete flush -> start from scratch */
203 kvmppc_mmu_pte_flush_all(vcpu);
204 break;
205 default:
206 WARN_ON(1);
207 break;
211 /* Flush with mask 0xfffffffff */
212 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
214 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
215 struct hlist_head *list;
216 struct hpte_cache *pte;
217 u64 vp_mask = 0xfffffffffULL;
219 list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
221 rcu_read_lock();
223 /* Check the list for matching entries and invalidate */
224 hlist_for_each_entry_rcu(pte, list, list_vpte)
225 if ((pte->pte.vpage & vp_mask) == guest_vp)
226 invalidate_pte(vcpu, pte);
228 rcu_read_unlock();
231 #ifdef CONFIG_PPC_BOOK3S_64
232 /* Flush with mask 0xffffffff0 */
233 static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
235 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
236 struct hlist_head *list;
237 struct hpte_cache *pte;
238 u64 vp_mask = 0xffffffff0ULL;
240 list = &vcpu3s->hpte_hash_vpte_64k[
241 kvmppc_mmu_hash_vpte_64k(guest_vp)];
243 rcu_read_lock();
245 /* Check the list for matching entries and invalidate */
246 hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
247 if ((pte->pte.vpage & vp_mask) == guest_vp)
248 invalidate_pte(vcpu, pte);
250 rcu_read_unlock();
252 #endif
254 /* Flush with mask 0xffffff000 */
255 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
257 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
258 struct hlist_head *list;
259 struct hpte_cache *pte;
260 u64 vp_mask = 0xffffff000ULL;
262 list = &vcpu3s->hpte_hash_vpte_long[
263 kvmppc_mmu_hash_vpte_long(guest_vp)];
265 rcu_read_lock();
267 /* Check the list for matching entries and invalidate */
268 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
269 if ((pte->pte.vpage & vp_mask) == guest_vp)
270 invalidate_pte(vcpu, pte);
272 rcu_read_unlock();
275 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
277 trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
278 guest_vp &= vp_mask;
280 switch(vp_mask) {
281 case 0xfffffffffULL:
282 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
283 break;
284 #ifdef CONFIG_PPC_BOOK3S_64
285 case 0xffffffff0ULL:
286 kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
287 break;
288 #endif
289 case 0xffffff000ULL:
290 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
291 break;
292 default:
293 WARN_ON(1);
294 return;
298 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
300 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
301 struct hpte_cache *pte;
302 int i;
304 trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
306 rcu_read_lock();
308 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
309 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
311 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
312 if ((pte->pte.raddr >= pa_start) &&
313 (pte->pte.raddr < pa_end))
314 invalidate_pte(vcpu, pte);
317 rcu_read_unlock();
320 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
322 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
323 struct hpte_cache *pte;
325 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
326 kvmppc_mmu_pte_flush_all(vcpu);
328 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
330 return pte;
333 void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
335 kmem_cache_free(hpte_cache, pte);
338 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
340 kvmppc_mmu_pte_flush(vcpu, 0, 0);
343 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
345 int i;
347 for (i = 0; i < len; i++)
348 INIT_HLIST_HEAD(&hash_list[i]);
351 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
353 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
355 /* init hpte lookup hashes */
356 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
357 ARRAY_SIZE(vcpu3s->hpte_hash_pte));
358 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
359 ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
360 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
361 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
362 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
363 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
364 #ifdef CONFIG_PPC_BOOK3S_64
365 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
366 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
367 #endif
369 spin_lock_init(&vcpu3s->mmu_lock);
371 return 0;
374 int kvmppc_mmu_hpte_sysinit(void)
376 /* init hpte slab cache */
377 hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
378 sizeof(struct hpte_cache), 0, NULL);
380 return 0;
383 void kvmppc_mmu_hpte_sysexit(void)
385 kmem_cache_destroy(hpte_cache);