2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
24 #include <linux/rculist.h>
26 #include <asm/kvm_ppc.h>
27 #include <asm/kvm_book3s.h>
28 #include <asm/machdep.h>
29 #include <asm/mmu_context.h>
30 #include <asm/hw_irq.h>
36 static struct kmem_cache
*hpte_cache
;
38 static inline u64
kvmppc_mmu_hash_pte(u64 eaddr
)
40 return hash_64(eaddr
>> PTE_SIZE
, HPTEG_HASH_BITS_PTE
);
43 static inline u64
kvmppc_mmu_hash_pte_long(u64 eaddr
)
45 return hash_64((eaddr
& 0x0ffff000) >> PTE_SIZE
,
46 HPTEG_HASH_BITS_PTE_LONG
);
49 static inline u64
kvmppc_mmu_hash_vpte(u64 vpage
)
51 return hash_64(vpage
& 0xfffffffffULL
, HPTEG_HASH_BITS_VPTE
);
54 static inline u64
kvmppc_mmu_hash_vpte_long(u64 vpage
)
56 return hash_64((vpage
& 0xffffff000ULL
) >> 12,
57 HPTEG_HASH_BITS_VPTE_LONG
);
60 #ifdef CONFIG_PPC_BOOK3S_64
61 static inline u64
kvmppc_mmu_hash_vpte_64k(u64 vpage
)
63 return hash_64((vpage
& 0xffffffff0ULL
) >> 4,
64 HPTEG_HASH_BITS_VPTE_64K
);
68 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
)
71 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
73 trace_kvm_book3s_mmu_map(pte
);
75 spin_lock(&vcpu3s
->mmu_lock
);
77 /* Add to ePTE list */
78 index
= kvmppc_mmu_hash_pte(pte
->pte
.eaddr
);
79 hlist_add_head_rcu(&pte
->list_pte
, &vcpu3s
->hpte_hash_pte
[index
]);
81 /* Add to ePTE_long list */
82 index
= kvmppc_mmu_hash_pte_long(pte
->pte
.eaddr
);
83 hlist_add_head_rcu(&pte
->list_pte_long
,
84 &vcpu3s
->hpte_hash_pte_long
[index
]);
86 /* Add to vPTE list */
87 index
= kvmppc_mmu_hash_vpte(pte
->pte
.vpage
);
88 hlist_add_head_rcu(&pte
->list_vpte
, &vcpu3s
->hpte_hash_vpte
[index
]);
90 /* Add to vPTE_long list */
91 index
= kvmppc_mmu_hash_vpte_long(pte
->pte
.vpage
);
92 hlist_add_head_rcu(&pte
->list_vpte_long
,
93 &vcpu3s
->hpte_hash_vpte_long
[index
]);
95 #ifdef CONFIG_PPC_BOOK3S_64
96 /* Add to vPTE_64k list */
97 index
= kvmppc_mmu_hash_vpte_64k(pte
->pte
.vpage
);
98 hlist_add_head_rcu(&pte
->list_vpte_64k
,
99 &vcpu3s
->hpte_hash_vpte_64k
[index
]);
102 vcpu3s
->hpte_cache_count
++;
104 spin_unlock(&vcpu3s
->mmu_lock
);
107 static void free_pte_rcu(struct rcu_head
*head
)
109 struct hpte_cache
*pte
= container_of(head
, struct hpte_cache
, rcu_head
);
110 kmem_cache_free(hpte_cache
, pte
);
113 static void invalidate_pte(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
)
115 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
117 trace_kvm_book3s_mmu_invalidate(pte
);
119 /* Different for 32 and 64 bit */
120 kvmppc_mmu_invalidate_pte(vcpu
, pte
);
122 spin_lock(&vcpu3s
->mmu_lock
);
124 /* pte already invalidated in between? */
125 if (hlist_unhashed(&pte
->list_pte
)) {
126 spin_unlock(&vcpu3s
->mmu_lock
);
130 hlist_del_init_rcu(&pte
->list_pte
);
131 hlist_del_init_rcu(&pte
->list_pte_long
);
132 hlist_del_init_rcu(&pte
->list_vpte
);
133 hlist_del_init_rcu(&pte
->list_vpte_long
);
134 #ifdef CONFIG_PPC_BOOK3S_64
135 hlist_del_init_rcu(&pte
->list_vpte_64k
);
137 vcpu3s
->hpte_cache_count
--;
139 spin_unlock(&vcpu3s
->mmu_lock
);
141 call_rcu(&pte
->rcu_head
, free_pte_rcu
);
144 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu
*vcpu
)
146 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
147 struct hpte_cache
*pte
;
152 for (i
= 0; i
< HPTEG_HASH_NUM_VPTE_LONG
; i
++) {
153 struct hlist_head
*list
= &vcpu3s
->hpte_hash_vpte_long
[i
];
155 hlist_for_each_entry_rcu(pte
, list
, list_vpte_long
)
156 invalidate_pte(vcpu
, pte
);
162 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu
*vcpu
, ulong guest_ea
)
164 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
165 struct hlist_head
*list
;
166 struct hpte_cache
*pte
;
168 /* Find the list of entries in the map */
169 list
= &vcpu3s
->hpte_hash_pte
[kvmppc_mmu_hash_pte(guest_ea
)];
173 /* Check the list for matching entries and invalidate */
174 hlist_for_each_entry_rcu(pte
, list
, list_pte
)
175 if ((pte
->pte
.eaddr
& ~0xfffUL
) == guest_ea
)
176 invalidate_pte(vcpu
, pte
);
181 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu
*vcpu
, ulong guest_ea
)
183 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
184 struct hlist_head
*list
;
185 struct hpte_cache
*pte
;
187 /* Find the list of entries in the map */
188 list
= &vcpu3s
->hpte_hash_pte_long
[
189 kvmppc_mmu_hash_pte_long(guest_ea
)];
193 /* Check the list for matching entries and invalidate */
194 hlist_for_each_entry_rcu(pte
, list
, list_pte_long
)
195 if ((pte
->pte
.eaddr
& 0x0ffff000UL
) == guest_ea
)
196 invalidate_pte(vcpu
, pte
);
201 void kvmppc_mmu_pte_flush(struct kvm_vcpu
*vcpu
, ulong guest_ea
, ulong ea_mask
)
203 trace_kvm_book3s_mmu_flush("", vcpu
, guest_ea
, ea_mask
);
208 kvmppc_mmu_pte_flush_page(vcpu
, guest_ea
);
211 kvmppc_mmu_pte_flush_long(vcpu
, guest_ea
);
214 /* Doing a complete flush -> start from scratch */
215 kvmppc_mmu_pte_flush_all(vcpu
);
223 /* Flush with mask 0xfffffffff */
224 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu
*vcpu
, u64 guest_vp
)
226 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
227 struct hlist_head
*list
;
228 struct hpte_cache
*pte
;
229 u64 vp_mask
= 0xfffffffffULL
;
231 list
= &vcpu3s
->hpte_hash_vpte
[kvmppc_mmu_hash_vpte(guest_vp
)];
235 /* Check the list for matching entries and invalidate */
236 hlist_for_each_entry_rcu(pte
, list
, list_vpte
)
237 if ((pte
->pte
.vpage
& vp_mask
) == guest_vp
)
238 invalidate_pte(vcpu
, pte
);
243 #ifdef CONFIG_PPC_BOOK3S_64
244 /* Flush with mask 0xffffffff0 */
245 static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu
*vcpu
, u64 guest_vp
)
247 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
248 struct hlist_head
*list
;
249 struct hpte_cache
*pte
;
250 u64 vp_mask
= 0xffffffff0ULL
;
252 list
= &vcpu3s
->hpte_hash_vpte_64k
[
253 kvmppc_mmu_hash_vpte_64k(guest_vp
)];
257 /* Check the list for matching entries and invalidate */
258 hlist_for_each_entry_rcu(pte
, list
, list_vpte_64k
)
259 if ((pte
->pte
.vpage
& vp_mask
) == guest_vp
)
260 invalidate_pte(vcpu
, pte
);
266 /* Flush with mask 0xffffff000 */
267 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu
*vcpu
, u64 guest_vp
)
269 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
270 struct hlist_head
*list
;
271 struct hpte_cache
*pte
;
272 u64 vp_mask
= 0xffffff000ULL
;
274 list
= &vcpu3s
->hpte_hash_vpte_long
[
275 kvmppc_mmu_hash_vpte_long(guest_vp
)];
279 /* Check the list for matching entries and invalidate */
280 hlist_for_each_entry_rcu(pte
, list
, list_vpte_long
)
281 if ((pte
->pte
.vpage
& vp_mask
) == guest_vp
)
282 invalidate_pte(vcpu
, pte
);
287 void kvmppc_mmu_pte_vflush(struct kvm_vcpu
*vcpu
, u64 guest_vp
, u64 vp_mask
)
289 trace_kvm_book3s_mmu_flush("v", vcpu
, guest_vp
, vp_mask
);
294 kvmppc_mmu_pte_vflush_short(vcpu
, guest_vp
);
296 #ifdef CONFIG_PPC_BOOK3S_64
298 kvmppc_mmu_pte_vflush_64k(vcpu
, guest_vp
);
302 kvmppc_mmu_pte_vflush_long(vcpu
, guest_vp
);
310 void kvmppc_mmu_pte_pflush(struct kvm_vcpu
*vcpu
, ulong pa_start
, ulong pa_end
)
312 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
313 struct hpte_cache
*pte
;
316 trace_kvm_book3s_mmu_flush("p", vcpu
, pa_start
, pa_end
);
320 for (i
= 0; i
< HPTEG_HASH_NUM_VPTE_LONG
; i
++) {
321 struct hlist_head
*list
= &vcpu3s
->hpte_hash_vpte_long
[i
];
323 hlist_for_each_entry_rcu(pte
, list
, list_vpte_long
)
324 if ((pte
->pte
.raddr
>= pa_start
) &&
325 (pte
->pte
.raddr
< pa_end
))
326 invalidate_pte(vcpu
, pte
);
332 struct hpte_cache
*kvmppc_mmu_hpte_cache_next(struct kvm_vcpu
*vcpu
)
334 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
335 struct hpte_cache
*pte
;
337 if (vcpu3s
->hpte_cache_count
== HPTEG_CACHE_NUM
)
338 kvmppc_mmu_pte_flush_all(vcpu
);
340 pte
= kmem_cache_zalloc(hpte_cache
, GFP_KERNEL
);
345 void kvmppc_mmu_hpte_cache_free(struct hpte_cache
*pte
)
347 kmem_cache_free(hpte_cache
, pte
);
350 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu
*vcpu
)
352 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
355 static void kvmppc_mmu_hpte_init_hash(struct hlist_head
*hash_list
, int len
)
359 for (i
= 0; i
< len
; i
++)
360 INIT_HLIST_HEAD(&hash_list
[i
]);
363 int kvmppc_mmu_hpte_init(struct kvm_vcpu
*vcpu
)
365 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
367 /* init hpte lookup hashes */
368 kvmppc_mmu_hpte_init_hash(vcpu3s
->hpte_hash_pte
,
369 ARRAY_SIZE(vcpu3s
->hpte_hash_pte
));
370 kvmppc_mmu_hpte_init_hash(vcpu3s
->hpte_hash_pte_long
,
371 ARRAY_SIZE(vcpu3s
->hpte_hash_pte_long
));
372 kvmppc_mmu_hpte_init_hash(vcpu3s
->hpte_hash_vpte
,
373 ARRAY_SIZE(vcpu3s
->hpte_hash_vpte
));
374 kvmppc_mmu_hpte_init_hash(vcpu3s
->hpte_hash_vpte_long
,
375 ARRAY_SIZE(vcpu3s
->hpte_hash_vpte_long
));
376 #ifdef CONFIG_PPC_BOOK3S_64
377 kvmppc_mmu_hpte_init_hash(vcpu3s
->hpte_hash_vpte_64k
,
378 ARRAY_SIZE(vcpu3s
->hpte_hash_vpte_64k
));
381 spin_lock_init(&vcpu3s
->mmu_lock
);
386 int kvmppc_mmu_hpte_sysinit(void)
388 /* init hpte slab cache */
389 hpte_cache
= kmem_cache_create("kvm-spt", sizeof(struct hpte_cache
),
390 sizeof(struct hpte_cache
), 0, NULL
);
395 void kvmppc_mmu_hpte_sysexit(void)
397 kmem_cache_destroy(hpte_cache
);