2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
24 /* Translate address of a vmalloc'd thing to a linear map address */
25 static void *real_vmalloc_addr(void *x
)
27 unsigned long addr
= (unsigned long) x
;
30 p
= find_linux_pte_or_hugepte(swapper_pg_dir
, addr
, NULL
);
31 if (!p
|| !pte_present(*p
))
33 /* assume we don't have huge pages in vmalloc space... */
34 addr
= (pte_pfn(*p
) << PAGE_SHIFT
) | (addr
& ~PAGE_MASK
);
38 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
39 static int global_invalidates(struct kvm
*kvm
, unsigned long flags
)
44 * If there is only one vcore, and it's currently running,
45 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
46 * we can use tlbiel as long as we mark all other physical
47 * cores as potentially having stale TLB entries for this lpid.
48 * If we're not using MMU notifiers, we never take pages away
49 * from the guest, so we can use tlbiel if requested.
50 * Otherwise, don't use tlbiel.
52 if (kvm
->arch
.online_vcores
== 1 && local_paca
->kvm_hstate
.kvm_vcpu
)
54 else if (kvm
->arch
.using_mmu_notifiers
)
57 global
= !(flags
& H_LOCAL
);
60 /* any other core might now have stale TLB entries... */
62 cpumask_setall(&kvm
->arch
.need_tlb_flush
);
63 cpumask_clear_cpu(local_paca
->kvm_hstate
.kvm_vcore
->pcpu
,
64 &kvm
->arch
.need_tlb_flush
);
71 * Add this HPTE into the chain for the real page.
72 * Must be called with the chain locked; it unlocks the chain.
74 void kvmppc_add_revmap_chain(struct kvm
*kvm
, struct revmap_entry
*rev
,
75 unsigned long *rmap
, long pte_index
, int realmode
)
77 struct revmap_entry
*head
, *tail
;
80 if (*rmap
& KVMPPC_RMAP_PRESENT
) {
81 i
= *rmap
& KVMPPC_RMAP_INDEX
;
82 head
= &kvm
->arch
.revmap
[i
];
84 head
= real_vmalloc_addr(head
);
85 tail
= &kvm
->arch
.revmap
[head
->back
];
87 tail
= real_vmalloc_addr(tail
);
89 rev
->back
= head
->back
;
90 tail
->forw
= pte_index
;
91 head
->back
= pte_index
;
93 rev
->forw
= rev
->back
= pte_index
;
94 *rmap
= (*rmap
& ~KVMPPC_RMAP_INDEX
) |
95 pte_index
| KVMPPC_RMAP_PRESENT
;
99 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain
);
101 /* Remove this HPTE from the chain for a real page */
102 static void remove_revmap_chain(struct kvm
*kvm
, long pte_index
,
103 struct revmap_entry
*rev
,
104 unsigned long hpte_v
, unsigned long hpte_r
)
106 struct revmap_entry
*next
, *prev
;
107 unsigned long gfn
, ptel
, head
;
108 struct kvm_memory_slot
*memslot
;
110 unsigned long rcbits
;
112 rcbits
= hpte_r
& (HPTE_R_R
| HPTE_R_C
);
113 ptel
= rev
->guest_rpte
|= rcbits
;
114 gfn
= hpte_rpn(ptel
, hpte_page_size(hpte_v
, ptel
));
115 memslot
= __gfn_to_memslot(kvm_memslots_raw(kvm
), gfn
);
119 rmap
= real_vmalloc_addr(&memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
]);
122 head
= *rmap
& KVMPPC_RMAP_INDEX
;
123 next
= real_vmalloc_addr(&kvm
->arch
.revmap
[rev
->forw
]);
124 prev
= real_vmalloc_addr(&kvm
->arch
.revmap
[rev
->back
]);
125 next
->back
= rev
->back
;
126 prev
->forw
= rev
->forw
;
127 if (head
== pte_index
) {
129 if (head
== pte_index
)
130 *rmap
&= ~(KVMPPC_RMAP_PRESENT
| KVMPPC_RMAP_INDEX
);
132 *rmap
= (*rmap
& ~KVMPPC_RMAP_INDEX
) | head
;
134 *rmap
|= rcbits
<< KVMPPC_RMAP_RC_SHIFT
;
138 static pte_t
lookup_linux_pte_and_update(pgd_t
*pgdir
, unsigned long hva
,
139 int writing
, unsigned long *pte_sizep
)
142 unsigned long ps
= *pte_sizep
;
143 unsigned int hugepage_shift
;
145 ptep
= find_linux_pte_or_hugepte(pgdir
, hva
, &hugepage_shift
);
149 *pte_sizep
= 1ul << hugepage_shift
;
151 *pte_sizep
= PAGE_SIZE
;
154 return kvmppc_read_update_linux_pte(ptep
, writing
, hugepage_shift
);
157 static inline void unlock_hpte(__be64
*hpte
, unsigned long hpte_v
)
159 asm volatile(PPC_RELEASE_BARRIER
"" : : : "memory");
160 hpte
[0] = cpu_to_be64(hpte_v
);
163 long kvmppc_do_h_enter(struct kvm
*kvm
, unsigned long flags
,
164 long pte_index
, unsigned long pteh
, unsigned long ptel
,
165 pgd_t
*pgdir
, bool realmode
, unsigned long *pte_idx_ret
)
167 unsigned long i
, pa
, gpa
, gfn
, psize
;
168 unsigned long slot_fn
, hva
;
170 struct revmap_entry
*rev
;
171 unsigned long g_ptel
;
172 struct kvm_memory_slot
*memslot
;
173 unsigned long *physp
, pte_size
;
177 unsigned int writing
;
178 unsigned long mmu_seq
;
179 unsigned long rcbits
;
181 psize
= hpte_page_size(pteh
, ptel
);
184 writing
= hpte_is_writable(ptel
);
185 pteh
&= ~(HPTE_V_HVLOCK
| HPTE_V_ABSENT
| HPTE_V_VALID
);
186 ptel
&= ~HPTE_GR_RESERVED
;
189 /* used later to detect if we might have been invalidated */
190 mmu_seq
= kvm
->mmu_notifier_seq
;
193 /* Find the memslot (if any) for this address */
194 gpa
= (ptel
& HPTE_R_RPN
) & ~(psize
- 1);
195 gfn
= gpa
>> PAGE_SHIFT
;
196 memslot
= __gfn_to_memslot(kvm_memslots_raw(kvm
), gfn
);
200 if (!(memslot
&& !(memslot
->flags
& KVM_MEMSLOT_INVALID
))) {
201 /* PPC970 can't do emulated MMIO */
202 if (!cpu_has_feature(CPU_FTR_ARCH_206
))
204 /* Emulated MMIO - mark this with key=31 */
205 pteh
|= HPTE_V_ABSENT
;
206 ptel
|= HPTE_R_KEY_HI
| HPTE_R_KEY_LO
;
210 /* Check if the requested page fits entirely in the memslot. */
211 if (!slot_is_aligned(memslot
, psize
))
213 slot_fn
= gfn
- memslot
->base_gfn
;
214 rmap
= &memslot
->arch
.rmap
[slot_fn
];
216 if (!kvm
->arch
.using_mmu_notifiers
) {
217 physp
= memslot
->arch
.slot_phys
;
222 physp
= real_vmalloc_addr(physp
);
226 is_io
= pa
& (HPTE_R_I
| HPTE_R_W
);
227 pte_size
= PAGE_SIZE
<< (pa
& KVMPPC_PAGE_ORDER_MASK
);
229 pa
|= gpa
& ~PAGE_MASK
;
231 /* Translate to host virtual address */
232 hva
= __gfn_to_hva_memslot(memslot
, gfn
);
234 /* Look up the Linux PTE for the backing page */
236 pte
= lookup_linux_pte_and_update(pgdir
, hva
, writing
,
238 if (pte_present(pte
) && !pte_numa(pte
)) {
239 if (writing
&& !pte_write(pte
))
240 /* make the actual HPTE be read-only */
241 ptel
= hpte_make_readonly(ptel
);
242 is_io
= hpte_cache_bits(pte_val(pte
));
243 pa
= pte_pfn(pte
) << PAGE_SHIFT
;
244 pa
|= hva
& (pte_size
- 1);
245 pa
|= gpa
& ~PAGE_MASK
;
249 if (pte_size
< psize
)
252 ptel
&= ~(HPTE_R_PP0
- psize
);
256 pteh
|= HPTE_V_VALID
;
258 pteh
|= HPTE_V_ABSENT
;
261 if (is_io
!= ~0ul && !hpte_cache_flags_ok(ptel
, is_io
)) {
265 * Allow guest to map emulated device memory as
266 * uncacheable, but actually make it cacheable.
268 ptel
&= ~(HPTE_R_W
|HPTE_R_I
|HPTE_R_G
);
272 /* Find and lock the HPTEG slot to use */
274 if (pte_index
>= kvm
->arch
.hpt_npte
)
276 if (likely((flags
& H_EXACT
) == 0)) {
278 hpte
= (__be64
*)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
279 for (i
= 0; i
< 8; ++i
) {
280 if ((be64_to_cpu(*hpte
) & HPTE_V_VALID
) == 0 &&
281 try_lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
|
288 * Since try_lock_hpte doesn't retry (not even stdcx.
289 * failures), it could be that there is a free slot
290 * but we transiently failed to lock it. Try again,
291 * actually locking each slot and checking it.
294 for (i
= 0; i
< 8; ++i
) {
296 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
298 pte
= be64_to_cpu(*hpte
);
299 if (!(pte
& (HPTE_V_VALID
| HPTE_V_ABSENT
)))
301 *hpte
&= ~cpu_to_be64(HPTE_V_HVLOCK
);
309 hpte
= (__be64
*)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
310 if (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
|
312 /* Lock the slot and check again */
315 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
317 pte
= be64_to_cpu(*hpte
);
318 if (pte
& (HPTE_V_VALID
| HPTE_V_ABSENT
)) {
319 *hpte
&= ~cpu_to_be64(HPTE_V_HVLOCK
);
325 /* Save away the guest's idea of the second HPTE dword */
326 rev
= &kvm
->arch
.revmap
[pte_index
];
328 rev
= real_vmalloc_addr(rev
);
330 rev
->guest_rpte
= g_ptel
;
331 note_hpte_modification(kvm
, rev
);
334 /* Link HPTE into reverse-map chain */
335 if (pteh
& HPTE_V_VALID
) {
337 rmap
= real_vmalloc_addr(rmap
);
339 /* Check for pending invalidations under the rmap chain lock */
340 if (kvm
->arch
.using_mmu_notifiers
&&
341 mmu_notifier_retry(kvm
, mmu_seq
)) {
342 /* inval in progress, write a non-present HPTE */
343 pteh
|= HPTE_V_ABSENT
;
344 pteh
&= ~HPTE_V_VALID
;
347 kvmppc_add_revmap_chain(kvm
, rev
, rmap
, pte_index
,
349 /* Only set R/C in real HPTE if already set in *rmap */
350 rcbits
= *rmap
>> KVMPPC_RMAP_RC_SHIFT
;
351 ptel
&= rcbits
| ~(HPTE_R_R
| HPTE_R_C
);
355 hpte
[1] = cpu_to_be64(ptel
);
357 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
359 hpte
[0] = cpu_to_be64(pteh
);
360 asm volatile("ptesync" : : : "memory");
362 *pte_idx_ret
= pte_index
;
365 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter
);
367 long kvmppc_h_enter(struct kvm_vcpu
*vcpu
, unsigned long flags
,
368 long pte_index
, unsigned long pteh
, unsigned long ptel
)
370 return kvmppc_do_h_enter(vcpu
->kvm
, flags
, pte_index
, pteh
, ptel
,
371 vcpu
->arch
.pgdir
, true, &vcpu
->arch
.gpr
[4]);
374 #ifdef __BIG_ENDIAN__
375 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
377 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
380 static inline int try_lock_tlbie(unsigned int *lock
)
382 unsigned int tmp
, old
;
383 unsigned int token
= LOCK_TOKEN
;
385 asm volatile("1:lwarx %1,0,%2\n"
392 : "=&r" (tmp
), "=&r" (old
)
393 : "r" (lock
), "r" (token
)
399 * tlbie/tlbiel is a bit different on the PPC970 compared to later
400 * processors such as POWER7; the large page bit is in the instruction
401 * not RB, and the top 16 bits and the bottom 12 bits of the VA
404 static void do_tlbies_970(struct kvm
*kvm
, unsigned long *rbvalues
,
405 long npages
, int global
, bool need_sync
)
410 while (!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
413 asm volatile("ptesync" : : : "memory");
414 for (i
= 0; i
< npages
; ++i
) {
415 unsigned long rb
= rbvalues
[i
];
417 if (rb
& 1) /* large page */
418 asm volatile("tlbie %0,1" : :
419 "r" (rb
& 0x0000fffffffff000ul
));
421 asm volatile("tlbie %0,0" : :
422 "r" (rb
& 0x0000fffffffff000ul
));
424 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
425 kvm
->arch
.tlbie_lock
= 0;
428 asm volatile("ptesync" : : : "memory");
429 for (i
= 0; i
< npages
; ++i
) {
430 unsigned long rb
= rbvalues
[i
];
432 if (rb
& 1) /* large page */
433 asm volatile("tlbiel %0,1" : :
434 "r" (rb
& 0x0000fffffffff000ul
));
436 asm volatile("tlbiel %0,0" : :
437 "r" (rb
& 0x0000fffffffff000ul
));
439 asm volatile("ptesync" : : : "memory");
443 static void do_tlbies(struct kvm
*kvm
, unsigned long *rbvalues
,
444 long npages
, int global
, bool need_sync
)
448 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
449 /* PPC970 tlbie instruction is a bit different */
450 do_tlbies_970(kvm
, rbvalues
, npages
, global
, need_sync
);
454 while (!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
457 asm volatile("ptesync" : : : "memory");
458 for (i
= 0; i
< npages
; ++i
)
459 asm volatile(PPC_TLBIE(%1,%0) : :
460 "r" (rbvalues
[i
]), "r" (kvm
->arch
.lpid
));
461 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
462 kvm
->arch
.tlbie_lock
= 0;
465 asm volatile("ptesync" : : : "memory");
466 for (i
= 0; i
< npages
; ++i
)
467 asm volatile("tlbiel %0" : : "r" (rbvalues
[i
]));
468 asm volatile("ptesync" : : : "memory");
472 long kvmppc_do_h_remove(struct kvm
*kvm
, unsigned long flags
,
473 unsigned long pte_index
, unsigned long avpn
,
474 unsigned long *hpret
)
477 unsigned long v
, r
, rb
;
478 struct revmap_entry
*rev
;
481 if (pte_index
>= kvm
->arch
.hpt_npte
)
483 hpte
= (__be64
*)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
484 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
486 pte
= be64_to_cpu(hpte
[0]);
487 if ((pte
& (HPTE_V_ABSENT
| HPTE_V_VALID
)) == 0 ||
488 ((flags
& H_AVPN
) && (pte
& ~0x7fUL
) != avpn
) ||
489 ((flags
& H_ANDCOND
) && (pte
& avpn
) != 0)) {
490 hpte
[0] &= ~cpu_to_be64(HPTE_V_HVLOCK
);
494 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
495 v
= pte
& ~HPTE_V_HVLOCK
;
496 if (v
& HPTE_V_VALID
) {
499 pte1
= be64_to_cpu(hpte
[1]);
500 hpte
[0] &= ~cpu_to_be64(HPTE_V_VALID
);
501 rb
= compute_tlbie_rb(v
, pte1
, pte_index
);
502 do_tlbies(kvm
, &rb
, 1, global_invalidates(kvm
, flags
), true);
503 /* Read PTE low word after tlbie to get final R/C values */
504 remove_revmap_chain(kvm
, pte_index
, rev
, v
, pte1
);
506 r
= rev
->guest_rpte
& ~HPTE_GR_RESERVED
;
507 note_hpte_modification(kvm
, rev
);
508 unlock_hpte(hpte
, 0);
514 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove
);
516 long kvmppc_h_remove(struct kvm_vcpu
*vcpu
, unsigned long flags
,
517 unsigned long pte_index
, unsigned long avpn
)
519 return kvmppc_do_h_remove(vcpu
->kvm
, flags
, pte_index
, avpn
,
523 long kvmppc_h_bulk_remove(struct kvm_vcpu
*vcpu
)
525 struct kvm
*kvm
= vcpu
->kvm
;
526 unsigned long *args
= &vcpu
->arch
.gpr
[4];
527 __be64
*hp
, *hptes
[4];
528 unsigned long tlbrb
[4];
529 long int i
, j
, k
, n
, found
, indexes
[4];
530 unsigned long flags
, req
, pte_index
, rcbits
;
532 long int ret
= H_SUCCESS
;
533 struct revmap_entry
*rev
, *revs
[4];
536 global
= global_invalidates(kvm
, 0);
537 for (i
= 0; i
< 4 && ret
== H_SUCCESS
; ) {
542 flags
= pte_index
>> 56;
543 pte_index
&= ((1ul << 56) - 1);
546 if (req
== 3) { /* no more requests */
550 if (req
!= 1 || flags
== 3 ||
551 pte_index
>= kvm
->arch
.hpt_npte
) {
552 /* parameter error */
553 args
[j
] = ((0xa0 | flags
) << 56) + pte_index
;
557 hp
= (__be64
*) (kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
558 /* to avoid deadlock, don't spin except for first */
559 if (!try_lock_hpte(hp
, HPTE_V_HVLOCK
)) {
562 while (!try_lock_hpte(hp
, HPTE_V_HVLOCK
))
566 hp0
= be64_to_cpu(hp
[0]);
567 if (hp0
& (HPTE_V_ABSENT
| HPTE_V_VALID
)) {
569 case 0: /* absolute */
572 case 1: /* andcond */
573 if (!(hp0
& args
[j
+ 1]))
577 if ((hp0
& ~0x7fUL
) == args
[j
+ 1])
583 hp
[0] &= ~cpu_to_be64(HPTE_V_HVLOCK
);
584 args
[j
] = ((0x90 | flags
) << 56) + pte_index
;
588 args
[j
] = ((0x80 | flags
) << 56) + pte_index
;
589 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
590 note_hpte_modification(kvm
, rev
);
592 if (!(hp0
& HPTE_V_VALID
)) {
593 /* insert R and C bits from PTE */
594 rcbits
= rev
->guest_rpte
& (HPTE_R_R
|HPTE_R_C
);
595 args
[j
] |= rcbits
<< (56 - 5);
600 /* leave it locked */
601 hp
[0] &= ~cpu_to_be64(HPTE_V_VALID
);
602 tlbrb
[n
] = compute_tlbie_rb(be64_to_cpu(hp
[0]),
603 be64_to_cpu(hp
[1]), pte_index
);
613 /* Now that we've collected a batch, do the tlbies */
614 do_tlbies(kvm
, tlbrb
, n
, global
, true);
616 /* Read PTE low words after tlbie to get final R/C values */
617 for (k
= 0; k
< n
; ++k
) {
619 pte_index
= args
[j
] & ((1ul << 56) - 1);
622 remove_revmap_chain(kvm
, pte_index
, rev
,
623 be64_to_cpu(hp
[0]), be64_to_cpu(hp
[1]));
624 rcbits
= rev
->guest_rpte
& (HPTE_R_R
|HPTE_R_C
);
625 args
[j
] |= rcbits
<< (56 - 5);
633 long kvmppc_h_protect(struct kvm_vcpu
*vcpu
, unsigned long flags
,
634 unsigned long pte_index
, unsigned long avpn
,
637 struct kvm
*kvm
= vcpu
->kvm
;
639 struct revmap_entry
*rev
;
640 unsigned long v
, r
, rb
, mask
, bits
;
643 if (pte_index
>= kvm
->arch
.hpt_npte
)
646 hpte
= (__be64
*)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
647 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
649 pte
= be64_to_cpu(hpte
[0]);
650 if ((pte
& (HPTE_V_ABSENT
| HPTE_V_VALID
)) == 0 ||
651 ((flags
& H_AVPN
) && (pte
& ~0x7fUL
) != avpn
)) {
652 hpte
[0] &= ~cpu_to_be64(HPTE_V_HVLOCK
);
657 bits
= (flags
<< 55) & HPTE_R_PP0
;
658 bits
|= (flags
<< 48) & HPTE_R_KEY_HI
;
659 bits
|= flags
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_LO
);
661 /* Update guest view of 2nd HPTE dword */
662 mask
= HPTE_R_PP0
| HPTE_R_PP
| HPTE_R_N
|
663 HPTE_R_KEY_HI
| HPTE_R_KEY_LO
;
664 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
666 r
= (rev
->guest_rpte
& ~mask
) | bits
;
668 note_hpte_modification(kvm
, rev
);
670 r
= (be64_to_cpu(hpte
[1]) & ~mask
) | bits
;
673 if (v
& HPTE_V_VALID
) {
674 rb
= compute_tlbie_rb(v
, r
, pte_index
);
675 hpte
[0] = cpu_to_be64(v
& ~HPTE_V_VALID
);
676 do_tlbies(kvm
, &rb
, 1, global_invalidates(kvm
, flags
), true);
678 * If the host has this page as readonly but the guest
679 * wants to make it read/write, reduce the permissions.
680 * Checking the host permissions involves finding the
681 * memslot and then the Linux PTE for the page.
683 if (hpte_is_writable(r
) && kvm
->arch
.using_mmu_notifiers
) {
684 unsigned long psize
, gfn
, hva
;
685 struct kvm_memory_slot
*memslot
;
686 pgd_t
*pgdir
= vcpu
->arch
.pgdir
;
689 psize
= hpte_page_size(v
, r
);
690 gfn
= ((r
& HPTE_R_RPN
) & ~(psize
- 1)) >> PAGE_SHIFT
;
691 memslot
= __gfn_to_memslot(kvm_memslots_raw(kvm
), gfn
);
693 hva
= __gfn_to_hva_memslot(memslot
, gfn
);
694 pte
= lookup_linux_pte_and_update(pgdir
, hva
,
696 if (pte_present(pte
) && !pte_write(pte
))
697 r
= hpte_make_readonly(r
);
701 hpte
[1] = cpu_to_be64(r
);
703 hpte
[0] = cpu_to_be64(v
& ~HPTE_V_HVLOCK
);
704 asm volatile("ptesync" : : : "memory");
708 long kvmppc_h_read(struct kvm_vcpu
*vcpu
, unsigned long flags
,
709 unsigned long pte_index
)
711 struct kvm
*kvm
= vcpu
->kvm
;
715 struct revmap_entry
*rev
= NULL
;
717 if (pte_index
>= kvm
->arch
.hpt_npte
)
719 if (flags
& H_READ_4
) {
723 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[pte_index
]);
724 for (i
= 0; i
< n
; ++i
, ++pte_index
) {
725 hpte
= (__be64
*)(kvm
->arch
.hpt_virt
+ (pte_index
<< 4));
726 v
= be64_to_cpu(hpte
[0]) & ~HPTE_V_HVLOCK
;
727 r
= be64_to_cpu(hpte
[1]);
728 if (v
& HPTE_V_ABSENT
) {
732 if (v
& HPTE_V_VALID
) {
733 r
= rev
[i
].guest_rpte
| (r
& (HPTE_R_R
| HPTE_R_C
));
734 r
&= ~HPTE_GR_RESERVED
;
736 vcpu
->arch
.gpr
[4 + i
* 2] = v
;
737 vcpu
->arch
.gpr
[5 + i
* 2] = r
;
742 void kvmppc_invalidate_hpte(struct kvm
*kvm
, __be64
*hptep
,
743 unsigned long pte_index
)
747 hptep
[0] &= ~cpu_to_be64(HPTE_V_VALID
);
748 rb
= compute_tlbie_rb(be64_to_cpu(hptep
[0]), be64_to_cpu(hptep
[1]),
750 do_tlbies(kvm
, &rb
, 1, 1, true);
752 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte
);
754 void kvmppc_clear_ref_hpte(struct kvm
*kvm
, __be64
*hptep
,
755 unsigned long pte_index
)
760 rb
= compute_tlbie_rb(be64_to_cpu(hptep
[0]), be64_to_cpu(hptep
[1]),
762 rbyte
= (be64_to_cpu(hptep
[1]) & ~HPTE_R_R
) >> 8;
763 /* modify only the second-last byte, which contains the ref bit */
764 *((char *)hptep
+ 14) = rbyte
;
765 do_tlbies(kvm
, &rb
, 1, 1, false);
767 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte
);
769 static int slb_base_page_shift
[4] = {
773 20, /* 1M, unsupported */
776 /* When called from virtmode, this func should be protected by
777 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
778 * can trigger deadlock issue.
780 long kvmppc_hv_find_lock_hpte(struct kvm
*kvm
, gva_t eaddr
, unsigned long slb_v
,
785 unsigned long somask
;
786 unsigned long vsid
, hash
;
789 unsigned long mask
, val
;
792 /* Get page shift, work out hash and AVPN etc. */
793 mask
= SLB_VSID_B
| HPTE_V_AVPN
| HPTE_V_SECONDARY
;
796 if (slb_v
& SLB_VSID_L
) {
797 mask
|= HPTE_V_LARGE
;
799 pshift
= slb_base_page_shift
[(slb_v
& SLB_VSID_LP
) >> 4];
801 if (slb_v
& SLB_VSID_B_1T
) {
802 somask
= (1UL << 40) - 1;
803 vsid
= (slb_v
& ~SLB_VSID_B
) >> SLB_VSID_SHIFT_1T
;
806 somask
= (1UL << 28) - 1;
807 vsid
= (slb_v
& ~SLB_VSID_B
) >> SLB_VSID_SHIFT
;
809 hash
= (vsid
^ ((eaddr
& somask
) >> pshift
)) & kvm
->arch
.hpt_mask
;
810 avpn
= slb_v
& ~(somask
>> 16); /* also includes B */
811 avpn
|= (eaddr
& somask
) >> 16;
814 avpn
&= ~((1UL << (pshift
- 16)) - 1);
820 hpte
= (__be64
*)(kvm
->arch
.hpt_virt
+ (hash
<< 7));
822 for (i
= 0; i
< 16; i
+= 2) {
823 /* Read the PTE racily */
824 v
= be64_to_cpu(hpte
[i
]) & ~HPTE_V_HVLOCK
;
826 /* Check valid/absent, hash, segment size and AVPN */
827 if (!(v
& valid
) || (v
& mask
) != val
)
830 /* Lock the PTE and read it under the lock */
831 while (!try_lock_hpte(&hpte
[i
], HPTE_V_HVLOCK
))
833 v
= be64_to_cpu(hpte
[i
]) & ~HPTE_V_HVLOCK
;
834 r
= be64_to_cpu(hpte
[i
+1]);
837 * Check the HPTE again, including base page size
839 if ((v
& valid
) && (v
& mask
) == val
&&
840 hpte_base_page_size(v
, r
) == (1ul << pshift
))
841 /* Return with the HPTE still locked */
842 return (hash
<< 3) + (i
>> 1);
844 /* Unlock and move on */
845 hpte
[i
] = cpu_to_be64(v
);
848 if (val
& HPTE_V_SECONDARY
)
850 val
|= HPTE_V_SECONDARY
;
851 hash
= hash
^ kvm
->arch
.hpt_mask
;
855 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte
);
858 * Called in real mode to check whether an HPTE not found fault
859 * is due to accessing a paged-out page or an emulated MMIO page,
860 * or if a protection fault is due to accessing a page that the
861 * guest wanted read/write access to but which we made read-only.
862 * Returns a possibly modified status (DSISR) value if not
863 * (i.e. pass the interrupt to the guest),
864 * -1 to pass the fault up to host kernel mode code, -2 to do that
865 * and also load the instruction word (for MMIO emulation),
866 * or 0 if we should make the guest retry the access.
868 long kvmppc_hpte_hv_fault(struct kvm_vcpu
*vcpu
, unsigned long addr
,
869 unsigned long slb_v
, unsigned int status
, bool data
)
871 struct kvm
*kvm
= vcpu
->kvm
;
873 unsigned long v
, r
, gr
;
876 struct revmap_entry
*rev
;
877 unsigned long pp
, key
;
879 /* For protection fault, expect to find a valid HPTE */
880 valid
= HPTE_V_VALID
;
881 if (status
& DSISR_NOHPTE
)
882 valid
|= HPTE_V_ABSENT
;
884 index
= kvmppc_hv_find_lock_hpte(kvm
, addr
, slb_v
, valid
);
886 if (status
& DSISR_NOHPTE
)
887 return status
; /* there really was no HPTE */
888 return 0; /* for prot fault, HPTE disappeared */
890 hpte
= (__be64
*)(kvm
->arch
.hpt_virt
+ (index
<< 4));
891 v
= be64_to_cpu(hpte
[0]) & ~HPTE_V_HVLOCK
;
892 r
= be64_to_cpu(hpte
[1]);
893 rev
= real_vmalloc_addr(&kvm
->arch
.revmap
[index
]);
894 gr
= rev
->guest_rpte
;
896 unlock_hpte(hpte
, v
);
898 /* For not found, if the HPTE is valid by now, retry the instruction */
899 if ((status
& DSISR_NOHPTE
) && (v
& HPTE_V_VALID
))
902 /* Check access permissions to the page */
903 pp
= gr
& (HPTE_R_PP0
| HPTE_R_PP
);
904 key
= (vcpu
->arch
.shregs
.msr
& MSR_PR
) ? SLB_VSID_KP
: SLB_VSID_KS
;
905 status
&= ~DSISR_NOHPTE
; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
907 if (gr
& (HPTE_R_N
| HPTE_R_G
))
908 return status
| SRR1_ISI_N_OR_G
;
909 if (!hpte_read_permission(pp
, slb_v
& key
))
910 return status
| SRR1_ISI_PROT
;
911 } else if (status
& DSISR_ISSTORE
) {
912 /* check write permission */
913 if (!hpte_write_permission(pp
, slb_v
& key
))
914 return status
| DSISR_PROTFAULT
;
916 if (!hpte_read_permission(pp
, slb_v
& key
))
917 return status
| DSISR_PROTFAULT
;
920 /* Check storage key, if applicable */
921 if (data
&& (vcpu
->arch
.shregs
.msr
& MSR_DR
)) {
922 unsigned int perm
= hpte_get_skey_perm(gr
, vcpu
->arch
.amr
);
923 if (status
& DSISR_ISSTORE
)
926 return status
| DSISR_KEYFAULT
;
929 /* Save HPTE info for virtual-mode handler */
930 vcpu
->arch
.pgfault_addr
= addr
;
931 vcpu
->arch
.pgfault_index
= index
;
932 vcpu
->arch
.pgfault_hpte
[0] = v
;
933 vcpu
->arch
.pgfault_hpte
[1] = r
;
935 /* Check the storage key to see if it is possibly emulated MMIO */
936 if (data
&& (vcpu
->arch
.shregs
.msr
& MSR_IR
) &&
937 (r
& (HPTE_R_KEY_HI
| HPTE_R_KEY_LO
)) ==
938 (HPTE_R_KEY_HI
| HPTE_R_KEY_LO
))
939 return -2; /* MMIO emulation - load instr word */
941 return -1; /* send fault up to host kernel mode */