Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux/fpc-iii.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
blob37fb3caa4c80a59c5162b92943e1d7878991b8f1
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
24 /* Translate address of a vmalloc'd thing to a linear map address */
25 static void *real_vmalloc_addr(void *x)
27 unsigned long addr = (unsigned long) x;
28 pte_t *p;
30 p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
31 if (!p || !pte_present(*p))
32 return NULL;
33 /* assume we don't have huge pages in vmalloc space... */
34 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
35 return __va(addr);
38 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
39 static int global_invalidates(struct kvm *kvm, unsigned long flags)
41 int global;
44 * If there is only one vcore, and it's currently running,
45 * we can use tlbiel as long as we mark all other physical
46 * cores as potentially having stale TLB entries for this lpid.
47 * If we're not using MMU notifiers, we never take pages away
48 * from the guest, so we can use tlbiel if requested.
49 * Otherwise, don't use tlbiel.
51 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore)
52 global = 0;
53 else if (kvm->arch.using_mmu_notifiers)
54 global = 1;
55 else
56 global = !(flags & H_LOCAL);
58 if (!global) {
59 /* any other core might now have stale TLB entries... */
60 smp_wmb();
61 cpumask_setall(&kvm->arch.need_tlb_flush);
62 cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
63 &kvm->arch.need_tlb_flush);
66 return global;
70 * Add this HPTE into the chain for the real page.
71 * Must be called with the chain locked; it unlocks the chain.
73 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
74 unsigned long *rmap, long pte_index, int realmode)
76 struct revmap_entry *head, *tail;
77 unsigned long i;
79 if (*rmap & KVMPPC_RMAP_PRESENT) {
80 i = *rmap & KVMPPC_RMAP_INDEX;
81 head = &kvm->arch.revmap[i];
82 if (realmode)
83 head = real_vmalloc_addr(head);
84 tail = &kvm->arch.revmap[head->back];
85 if (realmode)
86 tail = real_vmalloc_addr(tail);
87 rev->forw = i;
88 rev->back = head->back;
89 tail->forw = pte_index;
90 head->back = pte_index;
91 } else {
92 rev->forw = rev->back = pte_index;
93 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
94 pte_index | KVMPPC_RMAP_PRESENT;
96 unlock_rmap(rmap);
98 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
100 /* Remove this HPTE from the chain for a real page */
101 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
102 struct revmap_entry *rev,
103 unsigned long hpte_v, unsigned long hpte_r)
105 struct revmap_entry *next, *prev;
106 unsigned long gfn, ptel, head;
107 struct kvm_memory_slot *memslot;
108 unsigned long *rmap;
109 unsigned long rcbits;
111 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
112 ptel = rev->guest_rpte |= rcbits;
113 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
114 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
115 if (!memslot)
116 return;
118 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
119 lock_rmap(rmap);
121 head = *rmap & KVMPPC_RMAP_INDEX;
122 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
123 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
124 next->back = rev->back;
125 prev->forw = rev->forw;
126 if (head == pte_index) {
127 head = rev->forw;
128 if (head == pte_index)
129 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
130 else
131 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
133 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
134 unlock_rmap(rmap);
137 static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
138 int writing, unsigned long *pte_sizep)
140 pte_t *ptep;
141 unsigned long ps = *pte_sizep;
142 unsigned int hugepage_shift;
144 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
145 if (!ptep)
146 return __pte(0);
147 if (hugepage_shift)
148 *pte_sizep = 1ul << hugepage_shift;
149 else
150 *pte_sizep = PAGE_SIZE;
151 if (ps > *pte_sizep)
152 return __pte(0);
153 return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
156 static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
158 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
159 hpte[0] = hpte_v;
162 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
163 long pte_index, unsigned long pteh, unsigned long ptel,
164 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
166 unsigned long i, pa, gpa, gfn, psize;
167 unsigned long slot_fn, hva;
168 unsigned long *hpte;
169 struct revmap_entry *rev;
170 unsigned long g_ptel;
171 struct kvm_memory_slot *memslot;
172 unsigned long *physp, pte_size;
173 unsigned long is_io;
174 unsigned long *rmap;
175 pte_t pte;
176 unsigned int writing;
177 unsigned long mmu_seq;
178 unsigned long rcbits;
180 psize = hpte_page_size(pteh, ptel);
181 if (!psize)
182 return H_PARAMETER;
183 writing = hpte_is_writable(ptel);
184 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
185 ptel &= ~HPTE_GR_RESERVED;
186 g_ptel = ptel;
188 /* used later to detect if we might have been invalidated */
189 mmu_seq = kvm->mmu_notifier_seq;
190 smp_rmb();
192 /* Find the memslot (if any) for this address */
193 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
194 gfn = gpa >> PAGE_SHIFT;
195 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
196 pa = 0;
197 is_io = ~0ul;
198 rmap = NULL;
199 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
200 /* PPC970 can't do emulated MMIO */
201 if (!cpu_has_feature(CPU_FTR_ARCH_206))
202 return H_PARAMETER;
203 /* Emulated MMIO - mark this with key=31 */
204 pteh |= HPTE_V_ABSENT;
205 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
206 goto do_insert;
209 /* Check if the requested page fits entirely in the memslot. */
210 if (!slot_is_aligned(memslot, psize))
211 return H_PARAMETER;
212 slot_fn = gfn - memslot->base_gfn;
213 rmap = &memslot->arch.rmap[slot_fn];
215 if (!kvm->arch.using_mmu_notifiers) {
216 physp = memslot->arch.slot_phys;
217 if (!physp)
218 return H_PARAMETER;
219 physp += slot_fn;
220 if (realmode)
221 physp = real_vmalloc_addr(physp);
222 pa = *physp;
223 if (!pa)
224 return H_TOO_HARD;
225 is_io = pa & (HPTE_R_I | HPTE_R_W);
226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
227 pa &= PAGE_MASK;
228 pa |= gpa & ~PAGE_MASK;
229 } else {
230 /* Translate to host virtual address */
231 hva = __gfn_to_hva_memslot(memslot, gfn);
233 /* Look up the Linux PTE for the backing page */
234 pte_size = psize;
235 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
236 &pte_size);
237 if (pte_present(pte)) {
238 if (writing && !pte_write(pte))
239 /* make the actual HPTE be read-only */
240 ptel = hpte_make_readonly(ptel);
241 is_io = hpte_cache_bits(pte_val(pte));
242 pa = pte_pfn(pte) << PAGE_SHIFT;
243 pa |= hva & (pte_size - 1);
244 pa |= gpa & ~PAGE_MASK;
248 if (pte_size < psize)
249 return H_PARAMETER;
251 ptel &= ~(HPTE_R_PP0 - psize);
252 ptel |= pa;
254 if (pa)
255 pteh |= HPTE_V_VALID;
256 else
257 pteh |= HPTE_V_ABSENT;
259 /* Check WIMG */
260 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
261 if (is_io)
262 return H_PARAMETER;
264 * Allow guest to map emulated device memory as
265 * uncacheable, but actually make it cacheable.
267 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
268 ptel |= HPTE_R_M;
271 /* Find and lock the HPTEG slot to use */
272 do_insert:
273 if (pte_index >= kvm->arch.hpt_npte)
274 return H_PARAMETER;
275 if (likely((flags & H_EXACT) == 0)) {
276 pte_index &= ~7UL;
277 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
278 for (i = 0; i < 8; ++i) {
279 if ((*hpte & HPTE_V_VALID) == 0 &&
280 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
281 HPTE_V_ABSENT))
282 break;
283 hpte += 2;
285 if (i == 8) {
287 * Since try_lock_hpte doesn't retry (not even stdcx.
288 * failures), it could be that there is a free slot
289 * but we transiently failed to lock it. Try again,
290 * actually locking each slot and checking it.
292 hpte -= 16;
293 for (i = 0; i < 8; ++i) {
294 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
295 cpu_relax();
296 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
297 break;
298 *hpte &= ~HPTE_V_HVLOCK;
299 hpte += 2;
301 if (i == 8)
302 return H_PTEG_FULL;
304 pte_index += i;
305 } else {
306 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
307 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
308 HPTE_V_ABSENT)) {
309 /* Lock the slot and check again */
310 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
311 cpu_relax();
312 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
313 *hpte &= ~HPTE_V_HVLOCK;
314 return H_PTEG_FULL;
319 /* Save away the guest's idea of the second HPTE dword */
320 rev = &kvm->arch.revmap[pte_index];
321 if (realmode)
322 rev = real_vmalloc_addr(rev);
323 if (rev) {
324 rev->guest_rpte = g_ptel;
325 note_hpte_modification(kvm, rev);
328 /* Link HPTE into reverse-map chain */
329 if (pteh & HPTE_V_VALID) {
330 if (realmode)
331 rmap = real_vmalloc_addr(rmap);
332 lock_rmap(rmap);
333 /* Check for pending invalidations under the rmap chain lock */
334 if (kvm->arch.using_mmu_notifiers &&
335 mmu_notifier_retry(kvm, mmu_seq)) {
336 /* inval in progress, write a non-present HPTE */
337 pteh |= HPTE_V_ABSENT;
338 pteh &= ~HPTE_V_VALID;
339 unlock_rmap(rmap);
340 } else {
341 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
342 realmode);
343 /* Only set R/C in real HPTE if already set in *rmap */
344 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
345 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
349 hpte[1] = ptel;
351 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
352 eieio();
353 hpte[0] = pteh;
354 asm volatile("ptesync" : : : "memory");
356 *pte_idx_ret = pte_index;
357 return H_SUCCESS;
359 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
361 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
362 long pte_index, unsigned long pteh, unsigned long ptel)
364 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
365 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
368 #ifdef __BIG_ENDIAN__
369 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
370 #else
371 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
372 #endif
374 static inline int try_lock_tlbie(unsigned int *lock)
376 unsigned int tmp, old;
377 unsigned int token = LOCK_TOKEN;
379 asm volatile("1:lwarx %1,0,%2\n"
380 " cmpwi cr0,%1,0\n"
381 " bne 2f\n"
382 " stwcx. %3,0,%2\n"
383 " bne- 1b\n"
384 " isync\n"
385 "2:"
386 : "=&r" (tmp), "=&r" (old)
387 : "r" (lock), "r" (token)
388 : "cc", "memory");
389 return old == 0;
393 * tlbie/tlbiel is a bit different on the PPC970 compared to later
394 * processors such as POWER7; the large page bit is in the instruction
395 * not RB, and the top 16 bits and the bottom 12 bits of the VA
396 * in RB must be 0.
398 static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
399 long npages, int global, bool need_sync)
401 long i;
403 if (global) {
404 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
405 cpu_relax();
406 if (need_sync)
407 asm volatile("ptesync" : : : "memory");
408 for (i = 0; i < npages; ++i) {
409 unsigned long rb = rbvalues[i];
411 if (rb & 1) /* large page */
412 asm volatile("tlbie %0,1" : :
413 "r" (rb & 0x0000fffffffff000ul));
414 else
415 asm volatile("tlbie %0,0" : :
416 "r" (rb & 0x0000fffffffff000ul));
418 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
419 kvm->arch.tlbie_lock = 0;
420 } else {
421 if (need_sync)
422 asm volatile("ptesync" : : : "memory");
423 for (i = 0; i < npages; ++i) {
424 unsigned long rb = rbvalues[i];
426 if (rb & 1) /* large page */
427 asm volatile("tlbiel %0,1" : :
428 "r" (rb & 0x0000fffffffff000ul));
429 else
430 asm volatile("tlbiel %0,0" : :
431 "r" (rb & 0x0000fffffffff000ul));
433 asm volatile("ptesync" : : : "memory");
437 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
438 long npages, int global, bool need_sync)
440 long i;
442 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
443 /* PPC970 tlbie instruction is a bit different */
444 do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
445 return;
447 if (global) {
448 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
449 cpu_relax();
450 if (need_sync)
451 asm volatile("ptesync" : : : "memory");
452 for (i = 0; i < npages; ++i)
453 asm volatile(PPC_TLBIE(%1,%0) : :
454 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
455 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
456 kvm->arch.tlbie_lock = 0;
457 } else {
458 if (need_sync)
459 asm volatile("ptesync" : : : "memory");
460 for (i = 0; i < npages; ++i)
461 asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
462 asm volatile("ptesync" : : : "memory");
466 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
467 unsigned long pte_index, unsigned long avpn,
468 unsigned long *hpret)
470 unsigned long *hpte;
471 unsigned long v, r, rb;
472 struct revmap_entry *rev;
474 if (pte_index >= kvm->arch.hpt_npte)
475 return H_PARAMETER;
476 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
477 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
478 cpu_relax();
479 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
480 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
481 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
482 hpte[0] &= ~HPTE_V_HVLOCK;
483 return H_NOT_FOUND;
486 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
487 v = hpte[0] & ~HPTE_V_HVLOCK;
488 if (v & HPTE_V_VALID) {
489 hpte[0] &= ~HPTE_V_VALID;
490 rb = compute_tlbie_rb(v, hpte[1], pte_index);
491 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
492 /* Read PTE low word after tlbie to get final R/C values */
493 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
495 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
496 note_hpte_modification(kvm, rev);
497 unlock_hpte(hpte, 0);
499 hpret[0] = v;
500 hpret[1] = r;
501 return H_SUCCESS;
503 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
505 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
506 unsigned long pte_index, unsigned long avpn)
508 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
509 &vcpu->arch.gpr[4]);
512 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
514 struct kvm *kvm = vcpu->kvm;
515 unsigned long *args = &vcpu->arch.gpr[4];
516 unsigned long *hp, *hptes[4], tlbrb[4];
517 long int i, j, k, n, found, indexes[4];
518 unsigned long flags, req, pte_index, rcbits;
519 int global;
520 long int ret = H_SUCCESS;
521 struct revmap_entry *rev, *revs[4];
523 global = global_invalidates(kvm, 0);
524 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
525 n = 0;
526 for (; i < 4; ++i) {
527 j = i * 2;
528 pte_index = args[j];
529 flags = pte_index >> 56;
530 pte_index &= ((1ul << 56) - 1);
531 req = flags >> 6;
532 flags &= 3;
533 if (req == 3) { /* no more requests */
534 i = 4;
535 break;
537 if (req != 1 || flags == 3 ||
538 pte_index >= kvm->arch.hpt_npte) {
539 /* parameter error */
540 args[j] = ((0xa0 | flags) << 56) + pte_index;
541 ret = H_PARAMETER;
542 break;
544 hp = (unsigned long *)
545 (kvm->arch.hpt_virt + (pte_index << 4));
546 /* to avoid deadlock, don't spin except for first */
547 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
548 if (n)
549 break;
550 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
551 cpu_relax();
553 found = 0;
554 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
555 switch (flags & 3) {
556 case 0: /* absolute */
557 found = 1;
558 break;
559 case 1: /* andcond */
560 if (!(hp[0] & args[j + 1]))
561 found = 1;
562 break;
563 case 2: /* AVPN */
564 if ((hp[0] & ~0x7fUL) == args[j + 1])
565 found = 1;
566 break;
569 if (!found) {
570 hp[0] &= ~HPTE_V_HVLOCK;
571 args[j] = ((0x90 | flags) << 56) + pte_index;
572 continue;
575 args[j] = ((0x80 | flags) << 56) + pte_index;
576 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
577 note_hpte_modification(kvm, rev);
579 if (!(hp[0] & HPTE_V_VALID)) {
580 /* insert R and C bits from PTE */
581 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
582 args[j] |= rcbits << (56 - 5);
583 hp[0] = 0;
584 continue;
587 hp[0] &= ~HPTE_V_VALID; /* leave it locked */
588 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
589 indexes[n] = j;
590 hptes[n] = hp;
591 revs[n] = rev;
592 ++n;
595 if (!n)
596 break;
598 /* Now that we've collected a batch, do the tlbies */
599 do_tlbies(kvm, tlbrb, n, global, true);
601 /* Read PTE low words after tlbie to get final R/C values */
602 for (k = 0; k < n; ++k) {
603 j = indexes[k];
604 pte_index = args[j] & ((1ul << 56) - 1);
605 hp = hptes[k];
606 rev = revs[k];
607 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
608 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
609 args[j] |= rcbits << (56 - 5);
610 hp[0] = 0;
614 return ret;
617 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
618 unsigned long pte_index, unsigned long avpn,
619 unsigned long va)
621 struct kvm *kvm = vcpu->kvm;
622 unsigned long *hpte;
623 struct revmap_entry *rev;
624 unsigned long v, r, rb, mask, bits;
626 if (pte_index >= kvm->arch.hpt_npte)
627 return H_PARAMETER;
629 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
630 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
631 cpu_relax();
632 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
633 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
634 hpte[0] &= ~HPTE_V_HVLOCK;
635 return H_NOT_FOUND;
638 v = hpte[0];
639 bits = (flags << 55) & HPTE_R_PP0;
640 bits |= (flags << 48) & HPTE_R_KEY_HI;
641 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
643 /* Update guest view of 2nd HPTE dword */
644 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
645 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
646 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
647 if (rev) {
648 r = (rev->guest_rpte & ~mask) | bits;
649 rev->guest_rpte = r;
650 note_hpte_modification(kvm, rev);
652 r = (hpte[1] & ~mask) | bits;
654 /* Update HPTE */
655 if (v & HPTE_V_VALID) {
656 rb = compute_tlbie_rb(v, r, pte_index);
657 hpte[0] = v & ~HPTE_V_VALID;
658 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
660 * If the host has this page as readonly but the guest
661 * wants to make it read/write, reduce the permissions.
662 * Checking the host permissions involves finding the
663 * memslot and then the Linux PTE for the page.
665 if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
666 unsigned long psize, gfn, hva;
667 struct kvm_memory_slot *memslot;
668 pgd_t *pgdir = vcpu->arch.pgdir;
669 pte_t pte;
671 psize = hpte_page_size(v, r);
672 gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
673 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
674 if (memslot) {
675 hva = __gfn_to_hva_memslot(memslot, gfn);
676 pte = lookup_linux_pte_and_update(pgdir, hva,
677 1, &psize);
678 if (pte_present(pte) && !pte_write(pte))
679 r = hpte_make_readonly(r);
683 hpte[1] = r;
684 eieio();
685 hpte[0] = v & ~HPTE_V_HVLOCK;
686 asm volatile("ptesync" : : : "memory");
687 return H_SUCCESS;
690 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
691 unsigned long pte_index)
693 struct kvm *kvm = vcpu->kvm;
694 unsigned long *hpte, v, r;
695 int i, n = 1;
696 struct revmap_entry *rev = NULL;
698 if (pte_index >= kvm->arch.hpt_npte)
699 return H_PARAMETER;
700 if (flags & H_READ_4) {
701 pte_index &= ~3;
702 n = 4;
704 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
705 for (i = 0; i < n; ++i, ++pte_index) {
706 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
707 v = hpte[0] & ~HPTE_V_HVLOCK;
708 r = hpte[1];
709 if (v & HPTE_V_ABSENT) {
710 v &= ~HPTE_V_ABSENT;
711 v |= HPTE_V_VALID;
713 if (v & HPTE_V_VALID) {
714 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
715 r &= ~HPTE_GR_RESERVED;
717 vcpu->arch.gpr[4 + i * 2] = v;
718 vcpu->arch.gpr[5 + i * 2] = r;
720 return H_SUCCESS;
723 void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
724 unsigned long pte_index)
726 unsigned long rb;
728 hptep[0] &= ~HPTE_V_VALID;
729 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
730 do_tlbies(kvm, &rb, 1, 1, true);
732 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
734 void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
735 unsigned long pte_index)
737 unsigned long rb;
738 unsigned char rbyte;
740 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
741 rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
742 /* modify only the second-last byte, which contains the ref bit */
743 *((char *)hptep + 14) = rbyte;
744 do_tlbies(kvm, &rb, 1, 1, false);
746 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
748 static int slb_base_page_shift[4] = {
749 24, /* 16M */
750 16, /* 64k */
751 34, /* 16G */
752 20, /* 1M, unsupported */
755 /* When called from virtmode, this func should be protected by
756 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
757 * can trigger deadlock issue.
759 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
760 unsigned long valid)
762 unsigned int i;
763 unsigned int pshift;
764 unsigned long somask;
765 unsigned long vsid, hash;
766 unsigned long avpn;
767 unsigned long *hpte;
768 unsigned long mask, val;
769 unsigned long v, r;
771 /* Get page shift, work out hash and AVPN etc. */
772 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
773 val = 0;
774 pshift = 12;
775 if (slb_v & SLB_VSID_L) {
776 mask |= HPTE_V_LARGE;
777 val |= HPTE_V_LARGE;
778 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
780 if (slb_v & SLB_VSID_B_1T) {
781 somask = (1UL << 40) - 1;
782 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
783 vsid ^= vsid << 25;
784 } else {
785 somask = (1UL << 28) - 1;
786 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
788 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
789 avpn = slb_v & ~(somask >> 16); /* also includes B */
790 avpn |= (eaddr & somask) >> 16;
792 if (pshift >= 24)
793 avpn &= ~((1UL << (pshift - 16)) - 1);
794 else
795 avpn &= ~0x7fUL;
796 val |= avpn;
798 for (;;) {
799 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
801 for (i = 0; i < 16; i += 2) {
802 /* Read the PTE racily */
803 v = hpte[i] & ~HPTE_V_HVLOCK;
805 /* Check valid/absent, hash, segment size and AVPN */
806 if (!(v & valid) || (v & mask) != val)
807 continue;
809 /* Lock the PTE and read it under the lock */
810 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
811 cpu_relax();
812 v = hpte[i] & ~HPTE_V_HVLOCK;
813 r = hpte[i+1];
816 * Check the HPTE again, including large page size
817 * Since we don't currently allow any MPSS (mixed
818 * page-size segment) page sizes, it is sufficient
819 * to check against the actual page size.
821 if ((v & valid) && (v & mask) == val &&
822 hpte_page_size(v, r) == (1ul << pshift))
823 /* Return with the HPTE still locked */
824 return (hash << 3) + (i >> 1);
826 /* Unlock and move on */
827 hpte[i] = v;
830 if (val & HPTE_V_SECONDARY)
831 break;
832 val |= HPTE_V_SECONDARY;
833 hash = hash ^ kvm->arch.hpt_mask;
835 return -1;
837 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
840 * Called in real mode to check whether an HPTE not found fault
841 * is due to accessing a paged-out page or an emulated MMIO page,
842 * or if a protection fault is due to accessing a page that the
843 * guest wanted read/write access to but which we made read-only.
844 * Returns a possibly modified status (DSISR) value if not
845 * (i.e. pass the interrupt to the guest),
846 * -1 to pass the fault up to host kernel mode code, -2 to do that
847 * and also load the instruction word (for MMIO emulation),
848 * or 0 if we should make the guest retry the access.
850 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
851 unsigned long slb_v, unsigned int status, bool data)
853 struct kvm *kvm = vcpu->kvm;
854 long int index;
855 unsigned long v, r, gr;
856 unsigned long *hpte;
857 unsigned long valid;
858 struct revmap_entry *rev;
859 unsigned long pp, key;
861 /* For protection fault, expect to find a valid HPTE */
862 valid = HPTE_V_VALID;
863 if (status & DSISR_NOHPTE)
864 valid |= HPTE_V_ABSENT;
866 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
867 if (index < 0) {
868 if (status & DSISR_NOHPTE)
869 return status; /* there really was no HPTE */
870 return 0; /* for prot fault, HPTE disappeared */
872 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
873 v = hpte[0] & ~HPTE_V_HVLOCK;
874 r = hpte[1];
875 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
876 gr = rev->guest_rpte;
878 unlock_hpte(hpte, v);
880 /* For not found, if the HPTE is valid by now, retry the instruction */
881 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
882 return 0;
884 /* Check access permissions to the page */
885 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
886 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
887 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
888 if (!data) {
889 if (gr & (HPTE_R_N | HPTE_R_G))
890 return status | SRR1_ISI_N_OR_G;
891 if (!hpte_read_permission(pp, slb_v & key))
892 return status | SRR1_ISI_PROT;
893 } else if (status & DSISR_ISSTORE) {
894 /* check write permission */
895 if (!hpte_write_permission(pp, slb_v & key))
896 return status | DSISR_PROTFAULT;
897 } else {
898 if (!hpte_read_permission(pp, slb_v & key))
899 return status | DSISR_PROTFAULT;
902 /* Check storage key, if applicable */
903 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
904 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
905 if (status & DSISR_ISSTORE)
906 perm >>= 1;
907 if (perm & 1)
908 return status | DSISR_KEYFAULT;
911 /* Save HPTE info for virtual-mode handler */
912 vcpu->arch.pgfault_addr = addr;
913 vcpu->arch.pgfault_index = index;
914 vcpu->arch.pgfault_hpte[0] = v;
915 vcpu->arch.pgfault_hpte[1] = r;
917 /* Check the storage key to see if it is possibly emulated MMIO */
918 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
919 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
920 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
921 return -2; /* MMIO emulation - load instr word */
923 return -1; /* send fault up to host kernel mode */