2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15 #include <linux/log2.h>
17 #include <asm/tlbflush.h>
18 #include <asm/trace.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/kvm_book3s.h>
21 #include <asm/book3s/64/mmu-hash.h>
22 #include <asm/hvcall.h>
23 #include <asm/synch.h>
24 #include <asm/ppc-opcode.h>
25 #include <asm/pte-walk.h>
27 /* Translate address of a vmalloc'd thing to a linear map address */
28 static void *real_vmalloc_addr(void *x
)
30 unsigned long addr
= (unsigned long) x
;
33 * assume we don't have huge pages in vmalloc space...
34 * So don't worry about THP collapse/split. Called
35 * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
37 p
= find_init_mm_pte(addr
, NULL
);
38 if (!p
|| !pte_present(*p
))
40 addr
= (pte_pfn(*p
) << PAGE_SHIFT
) | (addr
& ~PAGE_MASK
);
44 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
45 static int global_invalidates(struct kvm
*kvm
)
51 * If there is only one vcore, and it's currently running,
52 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
53 * we can use tlbiel as long as we mark all other physical
54 * cores as potentially having stale TLB entries for this lpid.
55 * Otherwise, don't use tlbiel.
57 if (kvm
->arch
.online_vcores
== 1 && local_paca
->kvm_hstate
.kvm_vcpu
)
63 /* any other core might now have stale TLB entries... */
65 cpumask_setall(&kvm
->arch
.need_tlb_flush
);
66 cpu
= local_paca
->kvm_hstate
.kvm_vcore
->pcpu
;
68 * On POWER9, threads are independent but the TLB is shared,
69 * so use the bit for the first thread to represent the core.
71 if (cpu_has_feature(CPU_FTR_ARCH_300
))
72 cpu
= cpu_first_thread_sibling(cpu
);
73 cpumask_clear_cpu(cpu
, &kvm
->arch
.need_tlb_flush
);
80 * Add this HPTE into the chain for the real page.
81 * Must be called with the chain locked; it unlocks the chain.
83 void kvmppc_add_revmap_chain(struct kvm
*kvm
, struct revmap_entry
*rev
,
84 unsigned long *rmap
, long pte_index
, int realmode
)
86 struct revmap_entry
*head
, *tail
;
89 if (*rmap
& KVMPPC_RMAP_PRESENT
) {
90 i
= *rmap
& KVMPPC_RMAP_INDEX
;
91 head
= &kvm
->arch
.hpt
.rev
[i
];
93 head
= real_vmalloc_addr(head
);
94 tail
= &kvm
->arch
.hpt
.rev
[head
->back
];
96 tail
= real_vmalloc_addr(tail
);
98 rev
->back
= head
->back
;
99 tail
->forw
= pte_index
;
100 head
->back
= pte_index
;
102 rev
->forw
= rev
->back
= pte_index
;
103 *rmap
= (*rmap
& ~KVMPPC_RMAP_INDEX
) |
104 pte_index
| KVMPPC_RMAP_PRESENT
;
108 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain
);
110 /* Update the dirty bitmap of a memslot */
111 void kvmppc_update_dirty_map(struct kvm_memory_slot
*memslot
,
112 unsigned long gfn
, unsigned long psize
)
114 unsigned long npages
;
116 if (!psize
|| !memslot
->dirty_bitmap
)
118 npages
= (psize
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
119 gfn
-= memslot
->base_gfn
;
120 set_dirty_bits_atomic(memslot
->dirty_bitmap
, gfn
, npages
);
122 EXPORT_SYMBOL_GPL(kvmppc_update_dirty_map
);
124 static void kvmppc_set_dirty_from_hpte(struct kvm
*kvm
,
125 unsigned long hpte_v
, unsigned long hpte_gr
)
127 struct kvm_memory_slot
*memslot
;
131 psize
= kvmppc_actual_pgsz(hpte_v
, hpte_gr
);
132 gfn
= hpte_rpn(hpte_gr
, psize
);
133 memslot
= __gfn_to_memslot(kvm_memslots_raw(kvm
), gfn
);
134 if (memslot
&& memslot
->dirty_bitmap
)
135 kvmppc_update_dirty_map(memslot
, gfn
, psize
);
138 /* Returns a pointer to the revmap entry for the page mapped by a HPTE */
139 static unsigned long *revmap_for_hpte(struct kvm
*kvm
, unsigned long hpte_v
,
140 unsigned long hpte_gr
,
141 struct kvm_memory_slot
**memslotp
,
144 struct kvm_memory_slot
*memslot
;
148 gfn
= hpte_rpn(hpte_gr
, kvmppc_actual_pgsz(hpte_v
, hpte_gr
));
149 memslot
= __gfn_to_memslot(kvm_memslots_raw(kvm
), gfn
);
157 rmap
= real_vmalloc_addr(&memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
]);
161 /* Remove this HPTE from the chain for a real page */
162 static void remove_revmap_chain(struct kvm
*kvm
, long pte_index
,
163 struct revmap_entry
*rev
,
164 unsigned long hpte_v
, unsigned long hpte_r
)
166 struct revmap_entry
*next
, *prev
;
167 unsigned long ptel
, head
;
169 unsigned long rcbits
;
170 struct kvm_memory_slot
*memslot
;
173 rcbits
= hpte_r
& (HPTE_R_R
| HPTE_R_C
);
174 ptel
= rev
->guest_rpte
|= rcbits
;
175 rmap
= revmap_for_hpte(kvm
, hpte_v
, ptel
, &memslot
, &gfn
);
180 head
= *rmap
& KVMPPC_RMAP_INDEX
;
181 next
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[rev
->forw
]);
182 prev
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[rev
->back
]);
183 next
->back
= rev
->back
;
184 prev
->forw
= rev
->forw
;
185 if (head
== pte_index
) {
187 if (head
== pte_index
)
188 *rmap
&= ~(KVMPPC_RMAP_PRESENT
| KVMPPC_RMAP_INDEX
);
190 *rmap
= (*rmap
& ~KVMPPC_RMAP_INDEX
) | head
;
192 *rmap
|= rcbits
<< KVMPPC_RMAP_RC_SHIFT
;
193 if (rcbits
& HPTE_R_C
)
194 kvmppc_update_dirty_map(memslot
, gfn
,
195 kvmppc_actual_pgsz(hpte_v
, hpte_r
));
199 long kvmppc_do_h_enter(struct kvm
*kvm
, unsigned long flags
,
200 long pte_index
, unsigned long pteh
, unsigned long ptel
,
201 pgd_t
*pgdir
, bool realmode
, unsigned long *pte_idx_ret
)
203 unsigned long i
, pa
, gpa
, gfn
, psize
;
204 unsigned long slot_fn
, hva
;
206 struct revmap_entry
*rev
;
207 unsigned long g_ptel
;
208 struct kvm_memory_slot
*memslot
;
209 unsigned hpage_shift
;
213 unsigned int writing
;
214 unsigned long mmu_seq
;
215 unsigned long rcbits
, irq_flags
= 0;
217 if (kvm_is_radix(kvm
))
219 psize
= kvmppc_actual_pgsz(pteh
, ptel
);
222 writing
= hpte_is_writable(ptel
);
223 pteh
&= ~(HPTE_V_HVLOCK
| HPTE_V_ABSENT
| HPTE_V_VALID
);
224 ptel
&= ~HPTE_GR_RESERVED
;
227 /* used later to detect if we might have been invalidated */
228 mmu_seq
= kvm
->mmu_notifier_seq
;
231 /* Find the memslot (if any) for this address */
232 gpa
= (ptel
& HPTE_R_RPN
) & ~(psize
- 1);
233 gfn
= gpa
>> PAGE_SHIFT
;
234 memslot
= __gfn_to_memslot(kvm_memslots_raw(kvm
), gfn
);
238 if (!(memslot
&& !(memslot
->flags
& KVM_MEMSLOT_INVALID
))) {
239 /* Emulated MMIO - mark this with key=31 */
240 pteh
|= HPTE_V_ABSENT
;
241 ptel
|= HPTE_R_KEY_HI
| HPTE_R_KEY_LO
;
245 /* Check if the requested page fits entirely in the memslot. */
246 if (!slot_is_aligned(memslot
, psize
))
248 slot_fn
= gfn
- memslot
->base_gfn
;
249 rmap
= &memslot
->arch
.rmap
[slot_fn
];
251 /* Translate to host virtual address */
252 hva
= __gfn_to_hva_memslot(memslot
, gfn
);
254 * If we had a page table table change after lookup, we would
255 * retry via mmu_notifier_retry.
258 local_irq_save(irq_flags
);
260 * If called in real mode we have MSR_EE = 0. Otherwise
261 * we disable irq above.
263 ptep
= __find_linux_pte(pgdir
, hva
, NULL
, &hpage_shift
);
266 unsigned int host_pte_size
;
269 host_pte_size
= 1ul << hpage_shift
;
271 host_pte_size
= PAGE_SIZE
;
273 * We should always find the guest page size
274 * to <= host page size, if host is using hugepage
276 if (host_pte_size
< psize
) {
278 local_irq_restore(flags
);
281 pte
= kvmppc_read_update_linux_pte(ptep
, writing
);
282 if (pte_present(pte
) && !pte_protnone(pte
)) {
283 if (writing
&& !__pte_write(pte
))
284 /* make the actual HPTE be read-only */
285 ptel
= hpte_make_readonly(ptel
);
287 pa
= pte_pfn(pte
) << PAGE_SHIFT
;
288 pa
|= hva
& (host_pte_size
- 1);
289 pa
|= gpa
& ~PAGE_MASK
;
293 local_irq_restore(irq_flags
);
295 ptel
&= HPTE_R_KEY
| HPTE_R_PP0
| (psize
-1);
299 pteh
|= HPTE_V_VALID
;
301 pteh
|= HPTE_V_ABSENT
;
302 ptel
&= ~(HPTE_R_KEY_HI
| HPTE_R_KEY_LO
);
305 /*If we had host pte mapping then Check WIMG */
306 if (ptep
&& !hpte_cache_flags_ok(ptel
, is_ci
)) {
310 * Allow guest to map emulated device memory as
311 * uncacheable, but actually make it cacheable.
313 ptel
&= ~(HPTE_R_W
|HPTE_R_I
|HPTE_R_G
);
317 /* Find and lock the HPTEG slot to use */
319 if (pte_index
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
))
321 if (likely((flags
& H_EXACT
) == 0)) {
323 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (pte_index
<< 4));
324 for (i
= 0; i
< 8; ++i
) {
325 if ((be64_to_cpu(*hpte
) & HPTE_V_VALID
) == 0 &&
326 try_lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
|
333 * Since try_lock_hpte doesn't retry (not even stdcx.
334 * failures), it could be that there is a free slot
335 * but we transiently failed to lock it. Try again,
336 * actually locking each slot and checking it.
339 for (i
= 0; i
< 8; ++i
) {
341 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
343 pte
= be64_to_cpu(hpte
[0]);
344 if (!(pte
& (HPTE_V_VALID
| HPTE_V_ABSENT
)))
346 __unlock_hpte(hpte
, pte
);
354 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (pte_index
<< 4));
355 if (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
| HPTE_V_VALID
|
357 /* Lock the slot and check again */
360 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
362 pte
= be64_to_cpu(hpte
[0]);
363 if (pte
& (HPTE_V_VALID
| HPTE_V_ABSENT
)) {
364 __unlock_hpte(hpte
, pte
);
370 /* Save away the guest's idea of the second HPTE dword */
371 rev
= &kvm
->arch
.hpt
.rev
[pte_index
];
373 rev
= real_vmalloc_addr(rev
);
375 rev
->guest_rpte
= g_ptel
;
376 note_hpte_modification(kvm
, rev
);
379 /* Link HPTE into reverse-map chain */
380 if (pteh
& HPTE_V_VALID
) {
382 rmap
= real_vmalloc_addr(rmap
);
384 /* Check for pending invalidations under the rmap chain lock */
385 if (mmu_notifier_retry(kvm
, mmu_seq
)) {
386 /* inval in progress, write a non-present HPTE */
387 pteh
|= HPTE_V_ABSENT
;
388 pteh
&= ~HPTE_V_VALID
;
389 ptel
&= ~(HPTE_R_KEY_HI
| HPTE_R_KEY_LO
);
392 kvmppc_add_revmap_chain(kvm
, rev
, rmap
, pte_index
,
394 /* Only set R/C in real HPTE if already set in *rmap */
395 rcbits
= *rmap
>> KVMPPC_RMAP_RC_SHIFT
;
396 ptel
&= rcbits
| ~(HPTE_R_R
| HPTE_R_C
);
400 /* Convert to new format on P9 */
401 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
402 ptel
= hpte_old_to_new_r(pteh
, ptel
);
403 pteh
= hpte_old_to_new_v(pteh
);
405 hpte
[1] = cpu_to_be64(ptel
);
407 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
409 __unlock_hpte(hpte
, pteh
);
410 asm volatile("ptesync" : : : "memory");
412 *pte_idx_ret
= pte_index
;
415 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter
);
417 long kvmppc_h_enter(struct kvm_vcpu
*vcpu
, unsigned long flags
,
418 long pte_index
, unsigned long pteh
, unsigned long ptel
)
420 return kvmppc_do_h_enter(vcpu
->kvm
, flags
, pte_index
, pteh
, ptel
,
421 vcpu
->arch
.pgdir
, true, &vcpu
->arch
.gpr
[4]);
424 #ifdef __BIG_ENDIAN__
425 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
427 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
430 static inline int is_mmio_hpte(unsigned long v
, unsigned long r
)
432 return ((v
& HPTE_V_ABSENT
) &&
433 (r
& (HPTE_R_KEY_HI
| HPTE_R_KEY_LO
)) ==
434 (HPTE_R_KEY_HI
| HPTE_R_KEY_LO
));
437 static inline int try_lock_tlbie(unsigned int *lock
)
439 unsigned int tmp
, old
;
440 unsigned int token
= LOCK_TOKEN
;
442 asm volatile("1:lwarx %1,0,%2\n"
449 : "=&r" (tmp
), "=&r" (old
)
450 : "r" (lock
), "r" (token
)
455 static void do_tlbies(struct kvm
*kvm
, unsigned long *rbvalues
,
456 long npages
, int global
, bool need_sync
)
461 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
462 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
463 * the RS field, this is backwards-compatible with P7 and P8.
466 while (!try_lock_tlbie(&kvm
->arch
.tlbie_lock
))
469 asm volatile("ptesync" : : : "memory");
470 for (i
= 0; i
< npages
; ++i
) {
471 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
472 "r" (rbvalues
[i
]), "r" (kvm
->arch
.lpid
));
473 trace_tlbie(kvm
->arch
.lpid
, 0, rbvalues
[i
],
474 kvm
->arch
.lpid
, 0, 0, 0);
476 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
477 kvm
->arch
.tlbie_lock
= 0;
480 asm volatile("ptesync" : : : "memory");
481 for (i
= 0; i
< npages
; ++i
) {
482 asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
483 "r" (rbvalues
[i
]), "r" (0));
484 trace_tlbie(kvm
->arch
.lpid
, 1, rbvalues
[i
],
487 asm volatile("ptesync" : : : "memory");
491 long kvmppc_do_h_remove(struct kvm
*kvm
, unsigned long flags
,
492 unsigned long pte_index
, unsigned long avpn
,
493 unsigned long *hpret
)
496 unsigned long v
, r
, rb
;
497 struct revmap_entry
*rev
;
498 u64 pte
, orig_pte
, pte_r
;
500 if (kvm_is_radix(kvm
))
502 if (pte_index
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
))
504 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (pte_index
<< 4));
505 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
507 pte
= orig_pte
= be64_to_cpu(hpte
[0]);
508 pte_r
= be64_to_cpu(hpte
[1]);
509 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
510 pte
= hpte_new_to_old_v(pte
, pte_r
);
511 pte_r
= hpte_new_to_old_r(pte_r
);
513 if ((pte
& (HPTE_V_ABSENT
| HPTE_V_VALID
)) == 0 ||
514 ((flags
& H_AVPN
) && (pte
& ~0x7fUL
) != avpn
) ||
515 ((flags
& H_ANDCOND
) && (pte
& avpn
) != 0)) {
516 __unlock_hpte(hpte
, orig_pte
);
520 rev
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[pte_index
]);
521 v
= pte
& ~HPTE_V_HVLOCK
;
522 if (v
& HPTE_V_VALID
) {
523 hpte
[0] &= ~cpu_to_be64(HPTE_V_VALID
);
524 rb
= compute_tlbie_rb(v
, pte_r
, pte_index
);
525 do_tlbies(kvm
, &rb
, 1, global_invalidates(kvm
), true);
527 * The reference (R) and change (C) bits in a HPT
528 * entry can be set by hardware at any time up until
529 * the HPTE is invalidated and the TLB invalidation
530 * sequence has completed. This means that when
531 * removing a HPTE, we need to re-read the HPTE after
532 * the invalidation sequence has completed in order to
533 * obtain reliable values of R and C.
535 remove_revmap_chain(kvm
, pte_index
, rev
, v
,
536 be64_to_cpu(hpte
[1]));
538 r
= rev
->guest_rpte
& ~HPTE_GR_RESERVED
;
539 note_hpte_modification(kvm
, rev
);
540 unlock_hpte(hpte
, 0);
542 if (is_mmio_hpte(v
, pte_r
))
543 atomic64_inc(&kvm
->arch
.mmio_update
);
545 if (v
& HPTE_V_ABSENT
)
546 v
= (v
& ~HPTE_V_ABSENT
) | HPTE_V_VALID
;
551 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove
);
553 long kvmppc_h_remove(struct kvm_vcpu
*vcpu
, unsigned long flags
,
554 unsigned long pte_index
, unsigned long avpn
)
556 return kvmppc_do_h_remove(vcpu
->kvm
, flags
, pte_index
, avpn
,
560 long kvmppc_h_bulk_remove(struct kvm_vcpu
*vcpu
)
562 struct kvm
*kvm
= vcpu
->kvm
;
563 unsigned long *args
= &vcpu
->arch
.gpr
[4];
564 __be64
*hp
, *hptes
[4];
565 unsigned long tlbrb
[4];
566 long int i
, j
, k
, n
, found
, indexes
[4];
567 unsigned long flags
, req
, pte_index
, rcbits
;
569 long int ret
= H_SUCCESS
;
570 struct revmap_entry
*rev
, *revs
[4];
573 if (kvm_is_radix(kvm
))
575 global
= global_invalidates(kvm
);
576 for (i
= 0; i
< 4 && ret
== H_SUCCESS
; ) {
581 flags
= pte_index
>> 56;
582 pte_index
&= ((1ul << 56) - 1);
585 if (req
== 3) { /* no more requests */
589 if (req
!= 1 || flags
== 3 ||
590 pte_index
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
)) {
591 /* parameter error */
592 args
[j
] = ((0xa0 | flags
) << 56) + pte_index
;
596 hp
= (__be64
*) (kvm
->arch
.hpt
.virt
+ (pte_index
<< 4));
597 /* to avoid deadlock, don't spin except for first */
598 if (!try_lock_hpte(hp
, HPTE_V_HVLOCK
)) {
601 while (!try_lock_hpte(hp
, HPTE_V_HVLOCK
))
605 hp0
= be64_to_cpu(hp
[0]);
606 hp1
= be64_to_cpu(hp
[1]);
607 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
608 hp0
= hpte_new_to_old_v(hp0
, hp1
);
609 hp1
= hpte_new_to_old_r(hp1
);
611 if (hp0
& (HPTE_V_ABSENT
| HPTE_V_VALID
)) {
613 case 0: /* absolute */
616 case 1: /* andcond */
617 if (!(hp0
& args
[j
+ 1]))
621 if ((hp0
& ~0x7fUL
) == args
[j
+ 1])
627 hp
[0] &= ~cpu_to_be64(HPTE_V_HVLOCK
);
628 args
[j
] = ((0x90 | flags
) << 56) + pte_index
;
632 args
[j
] = ((0x80 | flags
) << 56) + pte_index
;
633 rev
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[pte_index
]);
634 note_hpte_modification(kvm
, rev
);
636 if (!(hp0
& HPTE_V_VALID
)) {
637 /* insert R and C bits from PTE */
638 rcbits
= rev
->guest_rpte
& (HPTE_R_R
|HPTE_R_C
);
639 args
[j
] |= rcbits
<< (56 - 5);
641 if (is_mmio_hpte(hp0
, hp1
))
642 atomic64_inc(&kvm
->arch
.mmio_update
);
646 /* leave it locked */
647 hp
[0] &= ~cpu_to_be64(HPTE_V_VALID
);
648 tlbrb
[n
] = compute_tlbie_rb(hp0
, hp1
, pte_index
);
658 /* Now that we've collected a batch, do the tlbies */
659 do_tlbies(kvm
, tlbrb
, n
, global
, true);
661 /* Read PTE low words after tlbie to get final R/C values */
662 for (k
= 0; k
< n
; ++k
) {
664 pte_index
= args
[j
] & ((1ul << 56) - 1);
667 remove_revmap_chain(kvm
, pte_index
, rev
,
668 be64_to_cpu(hp
[0]), be64_to_cpu(hp
[1]));
669 rcbits
= rev
->guest_rpte
& (HPTE_R_R
|HPTE_R_C
);
670 args
[j
] |= rcbits
<< (56 - 5);
671 __unlock_hpte(hp
, 0);
678 long kvmppc_h_protect(struct kvm_vcpu
*vcpu
, unsigned long flags
,
679 unsigned long pte_index
, unsigned long avpn
,
682 struct kvm
*kvm
= vcpu
->kvm
;
684 struct revmap_entry
*rev
;
685 unsigned long v
, r
, rb
, mask
, bits
;
688 if (kvm_is_radix(kvm
))
690 if (pte_index
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
))
693 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (pte_index
<< 4));
694 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
696 v
= pte_v
= be64_to_cpu(hpte
[0]);
697 if (cpu_has_feature(CPU_FTR_ARCH_300
))
698 v
= hpte_new_to_old_v(v
, be64_to_cpu(hpte
[1]));
699 if ((v
& (HPTE_V_ABSENT
| HPTE_V_VALID
)) == 0 ||
700 ((flags
& H_AVPN
) && (v
& ~0x7fUL
) != avpn
)) {
701 __unlock_hpte(hpte
, pte_v
);
705 pte_r
= be64_to_cpu(hpte
[1]);
706 bits
= (flags
<< 55) & HPTE_R_PP0
;
707 bits
|= (flags
<< 48) & HPTE_R_KEY_HI
;
708 bits
|= flags
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_LO
);
710 /* Update guest view of 2nd HPTE dword */
711 mask
= HPTE_R_PP0
| HPTE_R_PP
| HPTE_R_N
|
712 HPTE_R_KEY_HI
| HPTE_R_KEY_LO
;
713 rev
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[pte_index
]);
715 r
= (rev
->guest_rpte
& ~mask
) | bits
;
717 note_hpte_modification(kvm
, rev
);
721 if (v
& HPTE_V_VALID
) {
723 * If the page is valid, don't let it transition from
724 * readonly to writable. If it should be writable, we'll
725 * take a trap and let the page fault code sort it out.
727 r
= (pte_r
& ~mask
) | bits
;
728 if (hpte_is_writable(r
) && !hpte_is_writable(pte_r
))
729 r
= hpte_make_readonly(r
);
730 /* If the PTE is changing, invalidate it first */
732 rb
= compute_tlbie_rb(v
, r
, pte_index
);
733 hpte
[0] = cpu_to_be64((pte_v
& ~HPTE_V_VALID
) |
735 do_tlbies(kvm
, &rb
, 1, global_invalidates(kvm
), true);
736 /* Don't lose R/C bit updates done by hardware */
737 r
|= be64_to_cpu(hpte
[1]) & (HPTE_R_R
| HPTE_R_C
);
738 hpte
[1] = cpu_to_be64(r
);
741 unlock_hpte(hpte
, pte_v
& ~HPTE_V_HVLOCK
);
742 asm volatile("ptesync" : : : "memory");
743 if (is_mmio_hpte(v
, pte_r
))
744 atomic64_inc(&kvm
->arch
.mmio_update
);
749 long kvmppc_h_read(struct kvm_vcpu
*vcpu
, unsigned long flags
,
750 unsigned long pte_index
)
752 struct kvm
*kvm
= vcpu
->kvm
;
756 struct revmap_entry
*rev
= NULL
;
758 if (kvm_is_radix(kvm
))
760 if (pte_index
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
))
762 if (flags
& H_READ_4
) {
766 rev
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[pte_index
]);
767 for (i
= 0; i
< n
; ++i
, ++pte_index
) {
768 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (pte_index
<< 4));
769 v
= be64_to_cpu(hpte
[0]) & ~HPTE_V_HVLOCK
;
770 r
= be64_to_cpu(hpte
[1]);
771 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
772 v
= hpte_new_to_old_v(v
, r
);
773 r
= hpte_new_to_old_r(r
);
775 if (v
& HPTE_V_ABSENT
) {
779 if (v
& HPTE_V_VALID
) {
780 r
= rev
[i
].guest_rpte
| (r
& (HPTE_R_R
| HPTE_R_C
));
781 r
&= ~HPTE_GR_RESERVED
;
783 vcpu
->arch
.gpr
[4 + i
* 2] = v
;
784 vcpu
->arch
.gpr
[5 + i
* 2] = r
;
789 long kvmppc_h_clear_ref(struct kvm_vcpu
*vcpu
, unsigned long flags
,
790 unsigned long pte_index
)
792 struct kvm
*kvm
= vcpu
->kvm
;
794 unsigned long v
, r
, gr
;
795 struct revmap_entry
*rev
;
797 long ret
= H_NOT_FOUND
;
799 if (kvm_is_radix(kvm
))
801 if (pte_index
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
))
804 rev
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[pte_index
]);
805 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (pte_index
<< 4));
806 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
808 v
= be64_to_cpu(hpte
[0]);
809 r
= be64_to_cpu(hpte
[1]);
810 if (!(v
& (HPTE_V_VALID
| HPTE_V_ABSENT
)))
813 gr
= rev
->guest_rpte
;
814 if (rev
->guest_rpte
& HPTE_R_R
) {
815 rev
->guest_rpte
&= ~HPTE_R_R
;
816 note_hpte_modification(kvm
, rev
);
818 if (v
& HPTE_V_VALID
) {
819 gr
|= r
& (HPTE_R_R
| HPTE_R_C
);
821 kvmppc_clear_ref_hpte(kvm
, hpte
, pte_index
);
822 rmap
= revmap_for_hpte(kvm
, v
, gr
, NULL
, NULL
);
825 *rmap
|= KVMPPC_RMAP_REFERENCED
;
830 vcpu
->arch
.gpr
[4] = gr
;
833 unlock_hpte(hpte
, v
& ~HPTE_V_HVLOCK
);
837 long kvmppc_h_clear_mod(struct kvm_vcpu
*vcpu
, unsigned long flags
,
838 unsigned long pte_index
)
840 struct kvm
*kvm
= vcpu
->kvm
;
842 unsigned long v
, r
, gr
;
843 struct revmap_entry
*rev
;
844 long ret
= H_NOT_FOUND
;
846 if (kvm_is_radix(kvm
))
848 if (pte_index
>= kvmppc_hpt_npte(&kvm
->arch
.hpt
))
851 rev
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[pte_index
]);
852 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (pte_index
<< 4));
853 while (!try_lock_hpte(hpte
, HPTE_V_HVLOCK
))
855 v
= be64_to_cpu(hpte
[0]);
856 r
= be64_to_cpu(hpte
[1]);
857 if (!(v
& (HPTE_V_VALID
| HPTE_V_ABSENT
)))
860 gr
= rev
->guest_rpte
;
862 rev
->guest_rpte
&= ~HPTE_R_C
;
863 note_hpte_modification(kvm
, rev
);
865 if (v
& HPTE_V_VALID
) {
866 /* need to make it temporarily absent so C is stable */
867 hpte
[0] |= cpu_to_be64(HPTE_V_ABSENT
);
868 kvmppc_invalidate_hpte(kvm
, hpte
, pte_index
);
869 r
= be64_to_cpu(hpte
[1]);
870 gr
|= r
& (HPTE_R_R
| HPTE_R_C
);
872 hpte
[1] = cpu_to_be64(r
& ~HPTE_R_C
);
874 kvmppc_set_dirty_from_hpte(kvm
, v
, gr
);
877 vcpu
->arch
.gpr
[4] = gr
;
880 unlock_hpte(hpte
, v
& ~HPTE_V_HVLOCK
);
884 void kvmppc_invalidate_hpte(struct kvm
*kvm
, __be64
*hptep
,
885 unsigned long pte_index
)
890 hptep
[0] &= ~cpu_to_be64(HPTE_V_VALID
);
891 hp0
= be64_to_cpu(hptep
[0]);
892 hp1
= be64_to_cpu(hptep
[1]);
893 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
894 hp0
= hpte_new_to_old_v(hp0
, hp1
);
895 hp1
= hpte_new_to_old_r(hp1
);
897 rb
= compute_tlbie_rb(hp0
, hp1
, pte_index
);
898 do_tlbies(kvm
, &rb
, 1, 1, true);
900 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte
);
902 void kvmppc_clear_ref_hpte(struct kvm
*kvm
, __be64
*hptep
,
903 unsigned long pte_index
)
909 hp0
= be64_to_cpu(hptep
[0]);
910 hp1
= be64_to_cpu(hptep
[1]);
911 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
912 hp0
= hpte_new_to_old_v(hp0
, hp1
);
913 hp1
= hpte_new_to_old_r(hp1
);
915 rb
= compute_tlbie_rb(hp0
, hp1
, pte_index
);
916 rbyte
= (be64_to_cpu(hptep
[1]) & ~HPTE_R_R
) >> 8;
917 /* modify only the second-last byte, which contains the ref bit */
918 *((char *)hptep
+ 14) = rbyte
;
919 do_tlbies(kvm
, &rb
, 1, 1, false);
921 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte
);
923 static int slb_base_page_shift
[4] = {
927 20, /* 1M, unsupported */
930 static struct mmio_hpte_cache_entry
*mmio_cache_search(struct kvm_vcpu
*vcpu
,
931 unsigned long eaddr
, unsigned long slb_v
, long mmio_update
)
933 struct mmio_hpte_cache_entry
*entry
= NULL
;
937 for (i
= 0; i
< MMIO_HPTE_CACHE_SIZE
; i
++) {
938 entry
= &vcpu
->arch
.mmio_cache
.entry
[i
];
939 if (entry
->mmio_update
== mmio_update
) {
940 pshift
= entry
->slb_base_pshift
;
941 if ((entry
->eaddr
>> pshift
) == (eaddr
>> pshift
) &&
942 entry
->slb_v
== slb_v
)
949 static struct mmio_hpte_cache_entry
*
950 next_mmio_cache_entry(struct kvm_vcpu
*vcpu
)
952 unsigned int index
= vcpu
->arch
.mmio_cache
.index
;
954 vcpu
->arch
.mmio_cache
.index
++;
955 if (vcpu
->arch
.mmio_cache
.index
== MMIO_HPTE_CACHE_SIZE
)
956 vcpu
->arch
.mmio_cache
.index
= 0;
958 return &vcpu
->arch
.mmio_cache
.entry
[index
];
961 /* When called from virtmode, this func should be protected by
962 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
963 * can trigger deadlock issue.
965 long kvmppc_hv_find_lock_hpte(struct kvm
*kvm
, gva_t eaddr
, unsigned long slb_v
,
970 unsigned long somask
;
971 unsigned long vsid
, hash
;
974 unsigned long mask
, val
;
975 unsigned long v
, r
, orig_v
;
977 /* Get page shift, work out hash and AVPN etc. */
978 mask
= SLB_VSID_B
| HPTE_V_AVPN
| HPTE_V_SECONDARY
;
981 if (slb_v
& SLB_VSID_L
) {
982 mask
|= HPTE_V_LARGE
;
984 pshift
= slb_base_page_shift
[(slb_v
& SLB_VSID_LP
) >> 4];
986 if (slb_v
& SLB_VSID_B_1T
) {
987 somask
= (1UL << 40) - 1;
988 vsid
= (slb_v
& ~SLB_VSID_B
) >> SLB_VSID_SHIFT_1T
;
991 somask
= (1UL << 28) - 1;
992 vsid
= (slb_v
& ~SLB_VSID_B
) >> SLB_VSID_SHIFT
;
994 hash
= (vsid
^ ((eaddr
& somask
) >> pshift
)) & kvmppc_hpt_mask(&kvm
->arch
.hpt
);
995 avpn
= slb_v
& ~(somask
>> 16); /* also includes B */
996 avpn
|= (eaddr
& somask
) >> 16;
999 avpn
&= ~((1UL << (pshift
- 16)) - 1);
1005 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (hash
<< 7));
1007 for (i
= 0; i
< 16; i
+= 2) {
1008 /* Read the PTE racily */
1009 v
= be64_to_cpu(hpte
[i
]) & ~HPTE_V_HVLOCK
;
1010 if (cpu_has_feature(CPU_FTR_ARCH_300
))
1011 v
= hpte_new_to_old_v(v
, be64_to_cpu(hpte
[i
+1]));
1013 /* Check valid/absent, hash, segment size and AVPN */
1014 if (!(v
& valid
) || (v
& mask
) != val
)
1017 /* Lock the PTE and read it under the lock */
1018 while (!try_lock_hpte(&hpte
[i
], HPTE_V_HVLOCK
))
1020 v
= orig_v
= be64_to_cpu(hpte
[i
]) & ~HPTE_V_HVLOCK
;
1021 r
= be64_to_cpu(hpte
[i
+1]);
1022 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
1023 v
= hpte_new_to_old_v(v
, r
);
1024 r
= hpte_new_to_old_r(r
);
1028 * Check the HPTE again, including base page size
1030 if ((v
& valid
) && (v
& mask
) == val
&&
1031 kvmppc_hpte_base_page_shift(v
, r
) == pshift
)
1032 /* Return with the HPTE still locked */
1033 return (hash
<< 3) + (i
>> 1);
1035 __unlock_hpte(&hpte
[i
], orig_v
);
1038 if (val
& HPTE_V_SECONDARY
)
1040 val
|= HPTE_V_SECONDARY
;
1041 hash
= hash
^ kvmppc_hpt_mask(&kvm
->arch
.hpt
);
1045 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte
);
1048 * Called in real mode to check whether an HPTE not found fault
1049 * is due to accessing a paged-out page or an emulated MMIO page,
1050 * or if a protection fault is due to accessing a page that the
1051 * guest wanted read/write access to but which we made read-only.
1052 * Returns a possibly modified status (DSISR) value if not
1053 * (i.e. pass the interrupt to the guest),
1054 * -1 to pass the fault up to host kernel mode code, -2 to do that
1055 * and also load the instruction word (for MMIO emulation),
1056 * or 0 if we should make the guest retry the access.
1058 long kvmppc_hpte_hv_fault(struct kvm_vcpu
*vcpu
, unsigned long addr
,
1059 unsigned long slb_v
, unsigned int status
, bool data
)
1061 struct kvm
*kvm
= vcpu
->kvm
;
1063 unsigned long v
, r
, gr
, orig_v
;
1065 unsigned long valid
;
1066 struct revmap_entry
*rev
;
1067 unsigned long pp
, key
;
1068 struct mmio_hpte_cache_entry
*cache_entry
= NULL
;
1069 long mmio_update
= 0;
1071 /* For protection fault, expect to find a valid HPTE */
1072 valid
= HPTE_V_VALID
;
1073 if (status
& DSISR_NOHPTE
) {
1074 valid
|= HPTE_V_ABSENT
;
1075 mmio_update
= atomic64_read(&kvm
->arch
.mmio_update
);
1076 cache_entry
= mmio_cache_search(vcpu
, addr
, slb_v
, mmio_update
);
1079 index
= cache_entry
->pte_index
;
1080 v
= cache_entry
->hpte_v
;
1081 r
= cache_entry
->hpte_r
;
1082 gr
= cache_entry
->rpte
;
1084 index
= kvmppc_hv_find_lock_hpte(kvm
, addr
, slb_v
, valid
);
1086 if (status
& DSISR_NOHPTE
)
1087 return status
; /* there really was no HPTE */
1088 return 0; /* for prot fault, HPTE disappeared */
1090 hpte
= (__be64
*)(kvm
->arch
.hpt
.virt
+ (index
<< 4));
1091 v
= orig_v
= be64_to_cpu(hpte
[0]) & ~HPTE_V_HVLOCK
;
1092 r
= be64_to_cpu(hpte
[1]);
1093 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
1094 v
= hpte_new_to_old_v(v
, r
);
1095 r
= hpte_new_to_old_r(r
);
1097 rev
= real_vmalloc_addr(&kvm
->arch
.hpt
.rev
[index
]);
1098 gr
= rev
->guest_rpte
;
1100 unlock_hpte(hpte
, orig_v
);
1103 /* For not found, if the HPTE is valid by now, retry the instruction */
1104 if ((status
& DSISR_NOHPTE
) && (v
& HPTE_V_VALID
))
1107 /* Check access permissions to the page */
1108 pp
= gr
& (HPTE_R_PP0
| HPTE_R_PP
);
1109 key
= (vcpu
->arch
.shregs
.msr
& MSR_PR
) ? SLB_VSID_KP
: SLB_VSID_KS
;
1110 status
&= ~DSISR_NOHPTE
; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
1112 if (gr
& (HPTE_R_N
| HPTE_R_G
))
1113 return status
| SRR1_ISI_N_OR_G
;
1114 if (!hpte_read_permission(pp
, slb_v
& key
))
1115 return status
| SRR1_ISI_PROT
;
1116 } else if (status
& DSISR_ISSTORE
) {
1117 /* check write permission */
1118 if (!hpte_write_permission(pp
, slb_v
& key
))
1119 return status
| DSISR_PROTFAULT
;
1121 if (!hpte_read_permission(pp
, slb_v
& key
))
1122 return status
| DSISR_PROTFAULT
;
1125 /* Check storage key, if applicable */
1126 if (data
&& (vcpu
->arch
.shregs
.msr
& MSR_DR
)) {
1127 unsigned int perm
= hpte_get_skey_perm(gr
, vcpu
->arch
.amr
);
1128 if (status
& DSISR_ISSTORE
)
1131 return status
| DSISR_KEYFAULT
;
1134 /* Save HPTE info for virtual-mode handler */
1135 vcpu
->arch
.pgfault_addr
= addr
;
1136 vcpu
->arch
.pgfault_index
= index
;
1137 vcpu
->arch
.pgfault_hpte
[0] = v
;
1138 vcpu
->arch
.pgfault_hpte
[1] = r
;
1139 vcpu
->arch
.pgfault_cache
= cache_entry
;
1141 /* Check the storage key to see if it is possibly emulated MMIO */
1142 if ((r
& (HPTE_R_KEY_HI
| HPTE_R_KEY_LO
)) ==
1143 (HPTE_R_KEY_HI
| HPTE_R_KEY_LO
)) {
1145 unsigned int pshift
= 12;
1146 unsigned int pshift_index
;
1148 if (slb_v
& SLB_VSID_L
) {
1149 pshift_index
= ((slb_v
& SLB_VSID_LP
) >> 4);
1150 pshift
= slb_base_page_shift
[pshift_index
];
1152 cache_entry
= next_mmio_cache_entry(vcpu
);
1153 cache_entry
->eaddr
= addr
;
1154 cache_entry
->slb_base_pshift
= pshift
;
1155 cache_entry
->pte_index
= index
;
1156 cache_entry
->hpte_v
= v
;
1157 cache_entry
->hpte_r
= r
;
1158 cache_entry
->rpte
= gr
;
1159 cache_entry
->slb_v
= slb_v
;
1160 cache_entry
->mmio_update
= mmio_update
;
1162 if (data
&& (vcpu
->arch
.shregs
.msr
& MSR_IR
))
1163 return -2; /* MMIO emulation - load instr word */
1166 return -1; /* send fault up to host kernel mode */