2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
27 #include <linux/srcu.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
31 #include <asm/tlbflush.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/kvm_book3s.h>
34 #include <asm/mmu-hash64.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/cputable.h>
40 #include "book3s_hv_cma.h"
42 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
43 #define MAX_LPID_970 63
45 /* Power architecture requires HPT is at least 256kB */
46 #define PPC_MIN_HPT_ORDER 18
48 static long kvmppc_virtmode_do_h_enter(struct kvm
*kvm
, unsigned long flags
,
49 long pte_index
, unsigned long pteh
,
50 unsigned long ptel
, unsigned long *pte_idx_ret
);
51 static void kvmppc_rmap_reset(struct kvm
*kvm
);
53 long kvmppc_alloc_hpt(struct kvm
*kvm
, u32
*htab_orderp
)
56 struct revmap_entry
*rev
;
57 struct page
*page
= NULL
;
58 long order
= KVM_DEFAULT_HPT_ORDER
;
62 if (order
< PPC_MIN_HPT_ORDER
)
63 order
= PPC_MIN_HPT_ORDER
;
66 kvm
->arch
.hpt_cma_alloc
= 0;
68 * try first to allocate it from the kernel page allocator.
69 * We keep the CMA reserved for failed allocation.
71 hpt
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
| __GFP_REPEAT
|
72 __GFP_NOWARN
, order
- PAGE_SHIFT
);
74 /* Next try to allocate from the preallocated pool */
76 VM_BUG_ON(order
< KVM_CMA_CHUNK_ORDER
);
77 page
= kvm_alloc_hpt(1 << (order
- PAGE_SHIFT
));
79 hpt
= (unsigned long)pfn_to_kaddr(page_to_pfn(page
));
80 kvm
->arch
.hpt_cma_alloc
= 1;
85 /* Lastly try successively smaller sizes from the page allocator */
86 while (!hpt
&& order
> PPC_MIN_HPT_ORDER
) {
87 hpt
= __get_free_pages(GFP_KERNEL
|__GFP_ZERO
|__GFP_REPEAT
|
88 __GFP_NOWARN
, order
- PAGE_SHIFT
);
96 kvm
->arch
.hpt_virt
= hpt
;
97 kvm
->arch
.hpt_order
= order
;
98 /* HPTEs are 2**4 bytes long */
99 kvm
->arch
.hpt_npte
= 1ul << (order
- 4);
100 /* 128 (2**7) bytes in each HPTEG */
101 kvm
->arch
.hpt_mask
= (1ul << (order
- 7)) - 1;
103 /* Allocate reverse map array */
104 rev
= vmalloc(sizeof(struct revmap_entry
) * kvm
->arch
.hpt_npte
);
106 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
109 kvm
->arch
.revmap
= rev
;
110 kvm
->arch
.sdr1
= __pa(hpt
) | (order
- 18);
112 pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
113 hpt
, order
, kvm
->arch
.lpid
);
116 *htab_orderp
= order
;
120 if (kvm
->arch
.hpt_cma_alloc
)
121 kvm_release_hpt(page
, 1 << (order
- PAGE_SHIFT
));
123 free_pages(hpt
, order
- PAGE_SHIFT
);
127 long kvmppc_alloc_reset_hpt(struct kvm
*kvm
, u32
*htab_orderp
)
132 mutex_lock(&kvm
->lock
);
133 if (kvm
->arch
.rma_setup_done
) {
134 kvm
->arch
.rma_setup_done
= 0;
135 /* order rma_setup_done vs. vcpus_running */
137 if (atomic_read(&kvm
->arch
.vcpus_running
)) {
138 kvm
->arch
.rma_setup_done
= 1;
142 if (kvm
->arch
.hpt_virt
) {
143 order
= kvm
->arch
.hpt_order
;
144 /* Set the entire HPT to 0, i.e. invalid HPTEs */
145 memset((void *)kvm
->arch
.hpt_virt
, 0, 1ul << order
);
147 * Reset all the reverse-mapping chains for all memslots
149 kvmppc_rmap_reset(kvm
);
150 /* Ensure that each vcpu will flush its TLB on next entry. */
151 cpumask_setall(&kvm
->arch
.need_tlb_flush
);
152 *htab_orderp
= order
;
155 err
= kvmppc_alloc_hpt(kvm
, htab_orderp
);
156 order
= *htab_orderp
;
159 mutex_unlock(&kvm
->lock
);
163 void kvmppc_free_hpt(struct kvm
*kvm
)
165 kvmppc_free_lpid(kvm
->arch
.lpid
);
166 vfree(kvm
->arch
.revmap
);
167 if (kvm
->arch
.hpt_cma_alloc
)
168 kvm_release_hpt(virt_to_page(kvm
->arch
.hpt_virt
),
169 1 << (kvm
->arch
.hpt_order
- PAGE_SHIFT
));
171 free_pages(kvm
->arch
.hpt_virt
,
172 kvm
->arch
.hpt_order
- PAGE_SHIFT
);
175 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
176 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize
)
178 return (pgsize
> 0x1000) ? HPTE_V_LARGE
: 0;
181 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
182 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize
)
184 return (pgsize
== 0x10000) ? 0x1000 : 0;
187 void kvmppc_map_vrma(struct kvm_vcpu
*vcpu
, struct kvm_memory_slot
*memslot
,
188 unsigned long porder
)
191 unsigned long npages
;
192 unsigned long hp_v
, hp_r
;
193 unsigned long addr
, hash
;
195 unsigned long hp0
, hp1
;
196 unsigned long idx_ret
;
198 struct kvm
*kvm
= vcpu
->kvm
;
200 psize
= 1ul << porder
;
201 npages
= memslot
->npages
>> (porder
- PAGE_SHIFT
);
203 /* VRMA can't be > 1TB */
204 if (npages
> 1ul << (40 - porder
))
205 npages
= 1ul << (40 - porder
);
206 /* Can't use more than 1 HPTE per HPTEG */
207 if (npages
> kvm
->arch
.hpt_mask
+ 1)
208 npages
= kvm
->arch
.hpt_mask
+ 1;
210 hp0
= HPTE_V_1TB_SEG
| (VRMA_VSID
<< (40 - 16)) |
211 HPTE_V_BOLTED
| hpte0_pgsize_encoding(psize
);
212 hp1
= hpte1_pgsize_encoding(psize
) |
213 HPTE_R_R
| HPTE_R_C
| HPTE_R_M
| PP_RWXX
;
215 for (i
= 0; i
< npages
; ++i
) {
217 /* can't use hpt_hash since va > 64 bits */
218 hash
= (i
^ (VRMA_VSID
^ (VRMA_VSID
<< 25))) & kvm
->arch
.hpt_mask
;
220 * We assume that the hash table is empty and no
221 * vcpus are using it at this stage. Since we create
222 * at most one HPTE per HPTEG, we just assume entry 7
223 * is available and use it.
225 hash
= (hash
<< 3) + 7;
226 hp_v
= hp0
| ((addr
>> 16) & ~0x7fUL
);
228 ret
= kvmppc_virtmode_do_h_enter(kvm
, H_EXACT
, hash
, hp_v
, hp_r
,
230 if (ret
!= H_SUCCESS
) {
231 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
238 int kvmppc_mmu_hv_init(void)
240 unsigned long host_lpid
, rsvd_lpid
;
242 if (!cpu_has_feature(CPU_FTR_HVMODE
))
245 /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
246 if (cpu_has_feature(CPU_FTR_ARCH_206
)) {
247 host_lpid
= mfspr(SPRN_LPID
); /* POWER7 */
248 rsvd_lpid
= LPID_RSVD
;
250 host_lpid
= 0; /* PPC970 */
251 rsvd_lpid
= MAX_LPID_970
;
254 kvmppc_init_lpid(rsvd_lpid
+ 1);
256 kvmppc_claim_lpid(host_lpid
);
257 /* rsvd_lpid is reserved for use in partition switching */
258 kvmppc_claim_lpid(rsvd_lpid
);
263 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu
*vcpu
)
265 kvmppc_set_msr(vcpu
, vcpu
->arch
.intr_msr
);
269 * This is called to get a reference to a guest page if there isn't
270 * one already in the memslot->arch.slot_phys[] array.
272 static long kvmppc_get_guest_page(struct kvm
*kvm
, unsigned long gfn
,
273 struct kvm_memory_slot
*memslot
,
278 struct page
*page
, *hpage
, *pages
[1];
279 unsigned long s
, pgsize
;
280 unsigned long *physp
;
281 unsigned int is_io
, got
, pgorder
;
282 struct vm_area_struct
*vma
;
283 unsigned long pfn
, i
, npages
;
285 physp
= memslot
->arch
.slot_phys
;
288 if (physp
[gfn
- memslot
->base_gfn
])
296 start
= gfn_to_hva_memslot(memslot
, gfn
);
298 /* Instantiate and get the page we want access to */
299 np
= get_user_pages_fast(start
, 1, 1, pages
);
301 /* Look up the vma for the page */
302 down_read(¤t
->mm
->mmap_sem
);
303 vma
= find_vma(current
->mm
, start
);
304 if (!vma
|| vma
->vm_start
> start
||
305 start
+ psize
> vma
->vm_end
||
306 !(vma
->vm_flags
& VM_PFNMAP
))
308 is_io
= hpte_cache_bits(pgprot_val(vma
->vm_page_prot
));
309 pfn
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
310 /* check alignment of pfn vs. requested page size */
311 if (psize
> PAGE_SIZE
&& (pfn
& ((psize
>> PAGE_SHIFT
) - 1)))
313 up_read(¤t
->mm
->mmap_sem
);
317 got
= KVMPPC_GOT_PAGE
;
319 /* See if this is a large page */
321 if (PageHuge(page
)) {
322 hpage
= compound_head(page
);
323 s
<<= compound_order(hpage
);
324 /* Get the whole large page if slot alignment is ok */
325 if (s
> psize
&& slot_is_aligned(memslot
, s
) &&
326 !(memslot
->userspace_addr
& (s
- 1))) {
336 pfn
= page_to_pfn(page
);
339 npages
= pgsize
>> PAGE_SHIFT
;
340 pgorder
= __ilog2(npages
);
341 physp
+= (gfn
- memslot
->base_gfn
) & ~(npages
- 1);
342 spin_lock(&kvm
->arch
.slot_phys_lock
);
343 for (i
= 0; i
< npages
; ++i
) {
345 physp
[i
] = ((pfn
+ i
) << PAGE_SHIFT
) +
346 got
+ is_io
+ pgorder
;
350 spin_unlock(&kvm
->arch
.slot_phys_lock
);
359 up_read(¤t
->mm
->mmap_sem
);
363 long kvmppc_virtmode_do_h_enter(struct kvm
*kvm
, unsigned long flags
,
364 long pte_index
, unsigned long pteh
,
365 unsigned long ptel
, unsigned long *pte_idx_ret
)
367 unsigned long psize
, gpa
, gfn
;
368 struct kvm_memory_slot
*memslot
;
371 if (kvm
->arch
.using_mmu_notifiers
)
374 psize
= hpte_page_size(pteh
, ptel
);
378 pteh
&= ~(HPTE_V_HVLOCK
| HPTE_V_ABSENT
| HPTE_V_VALID
);
380 /* Find the memslot (if any) for this address */
381 gpa
= (ptel
& HPTE_R_RPN
) & ~(psize
- 1);
382 gfn
= gpa
>> PAGE_SHIFT
;
383 memslot
= gfn_to_memslot(kvm
, gfn
);
384 if (memslot
&& !(memslot
->flags
& KVM_MEMSLOT_INVALID
)) {
385 if (!slot_is_aligned(memslot
, psize
))
387 if (kvmppc_get_guest_page(kvm
, gfn
, memslot
, psize
) < 0)
392 /* Protect linux PTE lookup from page table destruction */
393 rcu_read_lock_sched(); /* this disables preemption too */
394 ret
= kvmppc_do_h_enter(kvm
, flags
, pte_index
, pteh
, ptel
,
395 current
->mm
->pgd
, false, pte_idx_ret
);
396 rcu_read_unlock_sched();
397 if (ret
== H_TOO_HARD
) {
398 /* this can't happen */
399 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
400 ret
= H_RESOURCE
; /* or something */
407 * We come here on a H_ENTER call from the guest when we are not
408 * using mmu notifiers and we don't have the requested page pinned
411 long kvmppc_virtmode_h_enter(struct kvm_vcpu
*vcpu
, unsigned long flags
,
412 long pte_index
, unsigned long pteh
,
415 return kvmppc_virtmode_do_h_enter(vcpu
->kvm
, flags
, pte_index
,
416 pteh
, ptel
, &vcpu
->arch
.gpr
[4]);
419 static struct kvmppc_slb
*kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu
*vcpu
,
425 for (i
= 0; i
< vcpu
->arch
.slb_nr
; i
++) {
426 if (!(vcpu
->arch
.slb
[i
].orige
& SLB_ESID_V
))
429 if (vcpu
->arch
.slb
[i
].origv
& SLB_VSID_B_1T
)
434 if (((vcpu
->arch
.slb
[i
].orige
^ eaddr
) & mask
) == 0)
435 return &vcpu
->arch
.slb
[i
];
440 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v
, unsigned long r
,
443 unsigned long ra_mask
;
445 ra_mask
= hpte_page_size(v
, r
) - 1;
446 return (r
& HPTE_R_RPN
& ~ra_mask
) | (ea
& ra_mask
);
449 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu
*vcpu
, gva_t eaddr
,
450 struct kvmppc_pte
*gpte
, bool data
, bool iswrite
)
452 struct kvm
*kvm
= vcpu
->kvm
;
453 struct kvmppc_slb
*slbe
;
455 unsigned long pp
, key
;
457 unsigned long *hptep
;
459 int virtmode
= vcpu
->arch
.shregs
.msr
& (data
? MSR_DR
: MSR_IR
);
463 slbe
= kvmppc_mmu_book3s_hv_find_slbe(vcpu
, eaddr
);
468 /* real mode access */
469 slb_v
= vcpu
->kvm
->arch
.vrma_slb_v
;
473 /* Find the HPTE in the hash table */
474 index
= kvmppc_hv_find_lock_hpte(kvm
, eaddr
, slb_v
,
475 HPTE_V_VALID
| HPTE_V_ABSENT
);
480 hptep
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (index
<< 4));
481 v
= hptep
[0] & ~HPTE_V_HVLOCK
;
482 gr
= kvm
->arch
.revmap
[index
].guest_rpte
;
484 /* Unlock the HPTE */
485 asm volatile("lwsync" : : : "memory");
490 gpte
->vpage
= ((v
& HPTE_V_AVPN
) << 4) | ((eaddr
>> 12) & 0xfff);
492 /* Get PP bits and key for permission check */
493 pp
= gr
& (HPTE_R_PP0
| HPTE_R_PP
);
494 key
= (vcpu
->arch
.shregs
.msr
& MSR_PR
) ? SLB_VSID_KP
: SLB_VSID_KS
;
497 /* Calculate permissions */
498 gpte
->may_read
= hpte_read_permission(pp
, key
);
499 gpte
->may_write
= hpte_write_permission(pp
, key
);
500 gpte
->may_execute
= gpte
->may_read
&& !(gr
& (HPTE_R_N
| HPTE_R_G
));
502 /* Storage key permission check for POWER7 */
503 if (data
&& virtmode
&& cpu_has_feature(CPU_FTR_ARCH_206
)) {
504 int amrfield
= hpte_get_skey_perm(gr
, vcpu
->arch
.amr
);
511 /* Get the guest physical address */
512 gpte
->raddr
= kvmppc_mmu_get_real_addr(v
, gr
, eaddr
);
517 * Quick test for whether an instruction is a load or a store.
518 * If the instruction is a load or a store, then this will indicate
519 * which it is, at least on server processors. (Embedded processors
520 * have some external PID instructions that don't follow the rule
521 * embodied here.) If the instruction isn't a load or store, then
522 * this doesn't return anything useful.
524 static int instruction_is_store(unsigned int instr
)
529 if ((instr
& 0xfc000000) == 0x7c000000)
530 mask
= 0x100; /* major opcode 31 */
531 return (instr
& mask
) != 0;
534 static int kvmppc_hv_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
535 unsigned long gpa
, gva_t ea
, int is_store
)
539 unsigned long srr0
= kvmppc_get_pc(vcpu
);
541 /* We try to load the last instruction. We don't let
542 * emulate_instruction do it as it doesn't check what
544 * If we fail, we just return to the guest and try executing it again.
546 if (vcpu
->arch
.last_inst
== KVM_INST_FETCH_FAILED
) {
547 ret
= kvmppc_ld(vcpu
, &srr0
, sizeof(u32
), &last_inst
, false);
548 if (ret
!= EMULATE_DONE
|| last_inst
== KVM_INST_FETCH_FAILED
)
550 vcpu
->arch
.last_inst
= last_inst
;
554 * WARNING: We do not know for sure whether the instruction we just
555 * read from memory is the same that caused the fault in the first
556 * place. If the instruction we read is neither an load or a store,
557 * then it can't access memory, so we don't need to worry about
558 * enforcing access permissions. So, assuming it is a load or
559 * store, we just check that its direction (load or store) is
560 * consistent with the original fault, since that's what we
561 * checked the access permissions against. If there is a mismatch
562 * we just return and retry the instruction.
565 if (instruction_is_store(kvmppc_get_last_inst(vcpu
)) != !!is_store
)
569 * Emulated accesses are emulated by looking at the hash for
570 * translation once, then performing the access later. The
571 * translation could be invalidated in the meantime in which
572 * point performing the subsequent memory access on the old
573 * physical address could possibly be a security hole for the
574 * guest (but not the host).
576 * This is less of an issue for MMIO stores since they aren't
577 * globally visible. It could be an issue for MMIO loads to
578 * a certain extent but we'll ignore it for now.
581 vcpu
->arch
.paddr_accessed
= gpa
;
582 vcpu
->arch
.vaddr_accessed
= ea
;
583 return kvmppc_emulate_mmio(run
, vcpu
);
586 int kvmppc_book3s_hv_page_fault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
587 unsigned long ea
, unsigned long dsisr
)
589 struct kvm
*kvm
= vcpu
->kvm
;
590 unsigned long *hptep
, hpte
[3], r
;
591 unsigned long mmu_seq
, psize
, pte_size
;
592 unsigned long gpa
, gfn
, hva
, pfn
;
593 struct kvm_memory_slot
*memslot
;
595 struct revmap_entry
*rev
;
596 struct page
*page
, *pages
[1];
597 long index
, ret
, npages
;
599 unsigned int writing
, write_ok
;
600 struct vm_area_struct
*vma
;
601 unsigned long rcbits
;
604 * Real-mode code has already searched the HPT and found the
605 * entry we're interested in. Lock the entry and check that
606 * it hasn't changed. If it has, just return and re-execute the
609 if (ea
!= vcpu
->arch
.pgfault_addr
)
611 index
= vcpu
->arch
.pgfault_index
;
612 hptep
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (index
<< 4));
613 rev
= &kvm
->arch
.revmap
[index
];
615 while (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
))
617 hpte
[0] = hptep
[0] & ~HPTE_V_HVLOCK
;
619 hpte
[2] = r
= rev
->guest_rpte
;
620 asm volatile("lwsync" : : : "memory");
624 if (hpte
[0] != vcpu
->arch
.pgfault_hpte
[0] ||
625 hpte
[1] != vcpu
->arch
.pgfault_hpte
[1])
628 /* Translate the logical address and get the page */
629 psize
= hpte_page_size(hpte
[0], r
);
630 gpa
= (r
& HPTE_R_RPN
& ~(psize
- 1)) | (ea
& (psize
- 1));
631 gfn
= gpa
>> PAGE_SHIFT
;
632 memslot
= gfn_to_memslot(kvm
, gfn
);
634 /* No memslot means it's an emulated MMIO region */
635 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
636 return kvmppc_hv_emulate_mmio(run
, vcpu
, gpa
, ea
,
637 dsisr
& DSISR_ISSTORE
);
639 if (!kvm
->arch
.using_mmu_notifiers
)
640 return -EFAULT
; /* should never get here */
642 /* used to check for invalidations in progress */
643 mmu_seq
= kvm
->mmu_notifier_seq
;
649 pte_size
= PAGE_SIZE
;
650 writing
= (dsisr
& DSISR_ISSTORE
) != 0;
651 /* If writing != 0, then the HPTE must allow writing, if we get here */
653 hva
= gfn_to_hva_memslot(memslot
, gfn
);
654 npages
= get_user_pages_fast(hva
, 1, writing
, pages
);
656 /* Check if it's an I/O mapping */
657 down_read(¤t
->mm
->mmap_sem
);
658 vma
= find_vma(current
->mm
, hva
);
659 if (vma
&& vma
->vm_start
<= hva
&& hva
+ psize
<= vma
->vm_end
&&
660 (vma
->vm_flags
& VM_PFNMAP
)) {
661 pfn
= vma
->vm_pgoff
+
662 ((hva
- vma
->vm_start
) >> PAGE_SHIFT
);
664 is_io
= hpte_cache_bits(pgprot_val(vma
->vm_page_prot
));
665 write_ok
= vma
->vm_flags
& VM_WRITE
;
667 up_read(¤t
->mm
->mmap_sem
);
672 pfn
= page_to_pfn(page
);
673 if (PageHuge(page
)) {
674 page
= compound_head(page
);
675 pte_size
<<= compound_order(page
);
677 /* if the guest wants write access, see if that is OK */
678 if (!writing
&& hpte_is_writable(r
)) {
679 unsigned int hugepage_shift
;
683 * We need to protect against page table destruction
684 * while looking up and updating the pte.
686 rcu_read_lock_sched();
687 ptep
= find_linux_pte_or_hugepte(current
->mm
->pgd
,
688 hva
, &hugepage_shift
);
690 pte
= kvmppc_read_update_linux_pte(ptep
, 1,
695 rcu_read_unlock_sched();
700 if (psize
> pte_size
)
703 /* Check WIMG vs. the actual page we're accessing */
704 if (!hpte_cache_flags_ok(r
, is_io
)) {
708 * Allow guest to map emulated device memory as
709 * uncacheable, but actually make it cacheable.
711 r
= (r
& ~(HPTE_R_W
|HPTE_R_I
|HPTE_R_G
)) | HPTE_R_M
;
715 * Set the HPTE to point to pfn.
716 * Since the pfn is at PAGE_SIZE granularity, make sure we
717 * don't mask out lower-order bits if psize < PAGE_SIZE.
719 if (psize
< PAGE_SIZE
)
721 r
= (r
& ~(HPTE_R_PP0
- psize
)) | ((pfn
<< PAGE_SHIFT
) & ~(psize
- 1));
722 if (hpte_is_writable(r
) && !write_ok
)
723 r
= hpte_make_readonly(r
);
726 while (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
))
728 if ((hptep
[0] & ~HPTE_V_HVLOCK
) != hpte
[0] || hptep
[1] != hpte
[1] ||
729 rev
->guest_rpte
!= hpte
[2])
730 /* HPTE has been changed under us; let the guest retry */
732 hpte
[0] = (hpte
[0] & ~HPTE_V_ABSENT
) | HPTE_V_VALID
;
734 rmap
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
737 /* Check if we might have been invalidated; let the guest retry if so */
739 if (mmu_notifier_retry(vcpu
->kvm
, mmu_seq
)) {
744 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
745 rcbits
= *rmap
>> KVMPPC_RMAP_RC_SHIFT
;
746 r
&= rcbits
| ~(HPTE_R_R
| HPTE_R_C
);
748 if (hptep
[0] & HPTE_V_VALID
) {
749 /* HPTE was previously valid, so we need to invalidate it */
751 hptep
[0] |= HPTE_V_ABSENT
;
752 kvmppc_invalidate_hpte(kvm
, hptep
, index
);
753 /* don't lose previous R and C bits */
754 r
|= hptep
[1] & (HPTE_R_R
| HPTE_R_C
);
756 kvmppc_add_revmap_chain(kvm
, rev
, rmap
, index
, 0);
762 asm volatile("ptesync" : : : "memory");
764 if (page
&& hpte_is_writable(r
))
770 * We drop pages[0] here, not page because page might
771 * have been set to the head page of a compound, but
772 * we have to drop the reference on the correct tail
773 * page to match the get inside gup()
780 hptep
[0] &= ~HPTE_V_HVLOCK
;
785 static void kvmppc_rmap_reset(struct kvm
*kvm
)
787 struct kvm_memslots
*slots
;
788 struct kvm_memory_slot
*memslot
;
791 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
792 slots
= kvm
->memslots
;
793 kvm_for_each_memslot(memslot
, slots
) {
795 * This assumes it is acceptable to lose reference and
796 * change bits across a reset.
798 memset(memslot
->arch
.rmap
, 0,
799 memslot
->npages
* sizeof(*memslot
->arch
.rmap
));
801 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
804 static int kvm_handle_hva_range(struct kvm
*kvm
,
807 int (*handler
)(struct kvm
*kvm
,
808 unsigned long *rmapp
,
813 struct kvm_memslots
*slots
;
814 struct kvm_memory_slot
*memslot
;
816 slots
= kvm_memslots(kvm
);
817 kvm_for_each_memslot(memslot
, slots
) {
818 unsigned long hva_start
, hva_end
;
821 hva_start
= max(start
, memslot
->userspace_addr
);
822 hva_end
= min(end
, memslot
->userspace_addr
+
823 (memslot
->npages
<< PAGE_SHIFT
));
824 if (hva_start
>= hva_end
)
827 * {gfn(page) | page intersects with [hva_start, hva_end)} =
828 * {gfn, gfn+1, ..., gfn_end-1}.
830 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
831 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
833 for (; gfn
< gfn_end
; ++gfn
) {
834 gfn_t gfn_offset
= gfn
- memslot
->base_gfn
;
836 ret
= handler(kvm
, &memslot
->arch
.rmap
[gfn_offset
], gfn
);
844 static int kvm_handle_hva(struct kvm
*kvm
, unsigned long hva
,
845 int (*handler
)(struct kvm
*kvm
, unsigned long *rmapp
,
848 return kvm_handle_hva_range(kvm
, hva
, hva
+ 1, handler
);
851 static int kvm_unmap_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
854 struct revmap_entry
*rev
= kvm
->arch
.revmap
;
855 unsigned long h
, i
, j
;
856 unsigned long *hptep
;
857 unsigned long ptel
, psize
, rcbits
;
861 if (!(*rmapp
& KVMPPC_RMAP_PRESENT
)) {
867 * To avoid an ABBA deadlock with the HPTE lock bit,
868 * we can't spin on the HPTE lock while holding the
871 i
= *rmapp
& KVMPPC_RMAP_INDEX
;
872 hptep
= (unsigned long *) (kvm
->arch
.hpt_virt
+ (i
<< 4));
873 if (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
)) {
874 /* unlock rmap before spinning on the HPTE lock */
876 while (hptep
[0] & HPTE_V_HVLOCK
)
882 /* chain is now empty */
883 *rmapp
&= ~(KVMPPC_RMAP_PRESENT
| KVMPPC_RMAP_INDEX
);
885 /* remove i from chain */
889 rev
[i
].forw
= rev
[i
].back
= i
;
890 *rmapp
= (*rmapp
& ~KVMPPC_RMAP_INDEX
) | j
;
893 /* Now check and modify the HPTE */
894 ptel
= rev
[i
].guest_rpte
;
895 psize
= hpte_page_size(hptep
[0], ptel
);
896 if ((hptep
[0] & HPTE_V_VALID
) &&
897 hpte_rpn(ptel
, psize
) == gfn
) {
898 if (kvm
->arch
.using_mmu_notifiers
)
899 hptep
[0] |= HPTE_V_ABSENT
;
900 kvmppc_invalidate_hpte(kvm
, hptep
, i
);
901 /* Harvest R and C */
902 rcbits
= hptep
[1] & (HPTE_R_R
| HPTE_R_C
);
903 *rmapp
|= rcbits
<< KVMPPC_RMAP_RC_SHIFT
;
904 if (rcbits
& ~rev
[i
].guest_rpte
) {
905 rev
[i
].guest_rpte
= ptel
| rcbits
;
906 note_hpte_modification(kvm
, &rev
[i
]);
910 hptep
[0] &= ~HPTE_V_HVLOCK
;
915 int kvm_unmap_hva_hv(struct kvm
*kvm
, unsigned long hva
)
917 if (kvm
->arch
.using_mmu_notifiers
)
918 kvm_handle_hva(kvm
, hva
, kvm_unmap_rmapp
);
922 int kvm_unmap_hva_range_hv(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
924 if (kvm
->arch
.using_mmu_notifiers
)
925 kvm_handle_hva_range(kvm
, start
, end
, kvm_unmap_rmapp
);
929 void kvmppc_core_flush_memslot_hv(struct kvm
*kvm
,
930 struct kvm_memory_slot
*memslot
)
932 unsigned long *rmapp
;
936 rmapp
= memslot
->arch
.rmap
;
937 gfn
= memslot
->base_gfn
;
938 for (n
= memslot
->npages
; n
; --n
) {
940 * Testing the present bit without locking is OK because
941 * the memslot has been marked invalid already, and hence
942 * no new HPTEs referencing this page can be created,
943 * thus the present bit can't go from 0 to 1.
945 if (*rmapp
& KVMPPC_RMAP_PRESENT
)
946 kvm_unmap_rmapp(kvm
, rmapp
, gfn
);
952 static int kvm_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
955 struct revmap_entry
*rev
= kvm
->arch
.revmap
;
956 unsigned long head
, i
, j
;
957 unsigned long *hptep
;
962 if (*rmapp
& KVMPPC_RMAP_REFERENCED
) {
963 *rmapp
&= ~KVMPPC_RMAP_REFERENCED
;
966 if (!(*rmapp
& KVMPPC_RMAP_PRESENT
)) {
971 i
= head
= *rmapp
& KVMPPC_RMAP_INDEX
;
973 hptep
= (unsigned long *) (kvm
->arch
.hpt_virt
+ (i
<< 4));
976 /* If this HPTE isn't referenced, ignore it */
977 if (!(hptep
[1] & HPTE_R_R
))
980 if (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
)) {
981 /* unlock rmap before spinning on the HPTE lock */
983 while (hptep
[0] & HPTE_V_HVLOCK
)
988 /* Now check and modify the HPTE */
989 if ((hptep
[0] & HPTE_V_VALID
) && (hptep
[1] & HPTE_R_R
)) {
990 kvmppc_clear_ref_hpte(kvm
, hptep
, i
);
991 if (!(rev
[i
].guest_rpte
& HPTE_R_R
)) {
992 rev
[i
].guest_rpte
|= HPTE_R_R
;
993 note_hpte_modification(kvm
, &rev
[i
]);
997 hptep
[0] &= ~HPTE_V_HVLOCK
;
998 } while ((i
= j
) != head
);
1004 int kvm_age_hva_hv(struct kvm
*kvm
, unsigned long hva
)
1006 if (!kvm
->arch
.using_mmu_notifiers
)
1008 return kvm_handle_hva(kvm
, hva
, kvm_age_rmapp
);
1011 static int kvm_test_age_rmapp(struct kvm
*kvm
, unsigned long *rmapp
,
1014 struct revmap_entry
*rev
= kvm
->arch
.revmap
;
1015 unsigned long head
, i
, j
;
1019 if (*rmapp
& KVMPPC_RMAP_REFERENCED
)
1023 if (*rmapp
& KVMPPC_RMAP_REFERENCED
)
1026 if (*rmapp
& KVMPPC_RMAP_PRESENT
) {
1027 i
= head
= *rmapp
& KVMPPC_RMAP_INDEX
;
1029 hp
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (i
<< 4));
1031 if (hp
[1] & HPTE_R_R
)
1033 } while ((i
= j
) != head
);
1042 int kvm_test_age_hva_hv(struct kvm
*kvm
, unsigned long hva
)
1044 if (!kvm
->arch
.using_mmu_notifiers
)
1046 return kvm_handle_hva(kvm
, hva
, kvm_test_age_rmapp
);
1049 void kvm_set_spte_hva_hv(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
1051 if (!kvm
->arch
.using_mmu_notifiers
)
1053 kvm_handle_hva(kvm
, hva
, kvm_unmap_rmapp
);
1056 static int kvm_test_clear_dirty(struct kvm
*kvm
, unsigned long *rmapp
)
1058 struct revmap_entry
*rev
= kvm
->arch
.revmap
;
1059 unsigned long head
, i
, j
;
1060 unsigned long *hptep
;
1065 if (*rmapp
& KVMPPC_RMAP_CHANGED
) {
1066 *rmapp
&= ~KVMPPC_RMAP_CHANGED
;
1069 if (!(*rmapp
& KVMPPC_RMAP_PRESENT
)) {
1074 i
= head
= *rmapp
& KVMPPC_RMAP_INDEX
;
1076 hptep
= (unsigned long *) (kvm
->arch
.hpt_virt
+ (i
<< 4));
1079 if (!(hptep
[1] & HPTE_R_C
))
1082 if (!try_lock_hpte(hptep
, HPTE_V_HVLOCK
)) {
1083 /* unlock rmap before spinning on the HPTE lock */
1085 while (hptep
[0] & HPTE_V_HVLOCK
)
1090 /* Now check and modify the HPTE */
1091 if ((hptep
[0] & HPTE_V_VALID
) && (hptep
[1] & HPTE_R_C
)) {
1092 /* need to make it temporarily absent to clear C */
1093 hptep
[0] |= HPTE_V_ABSENT
;
1094 kvmppc_invalidate_hpte(kvm
, hptep
, i
);
1095 hptep
[1] &= ~HPTE_R_C
;
1097 hptep
[0] = (hptep
[0] & ~HPTE_V_ABSENT
) | HPTE_V_VALID
;
1098 if (!(rev
[i
].guest_rpte
& HPTE_R_C
)) {
1099 rev
[i
].guest_rpte
|= HPTE_R_C
;
1100 note_hpte_modification(kvm
, &rev
[i
]);
1104 hptep
[0] &= ~HPTE_V_HVLOCK
;
1105 } while ((i
= j
) != head
);
1111 static void harvest_vpa_dirty(struct kvmppc_vpa
*vpa
,
1112 struct kvm_memory_slot
*memslot
,
1117 if (!vpa
->dirty
|| !vpa
->pinned_addr
)
1119 gfn
= vpa
->gpa
>> PAGE_SHIFT
;
1120 if (gfn
< memslot
->base_gfn
||
1121 gfn
>= memslot
->base_gfn
+ memslot
->npages
)
1126 __set_bit_le(gfn
- memslot
->base_gfn
, map
);
1129 long kvmppc_hv_get_dirty_log(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
,
1133 unsigned long *rmapp
;
1134 struct kvm_vcpu
*vcpu
;
1137 rmapp
= memslot
->arch
.rmap
;
1138 for (i
= 0; i
< memslot
->npages
; ++i
) {
1139 if (kvm_test_clear_dirty(kvm
, rmapp
) && map
)
1140 __set_bit_le(i
, map
);
1144 /* Harvest dirty bits from VPA and DTL updates */
1145 /* Note: we never modify the SLB shadow buffer areas */
1146 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1147 spin_lock(&vcpu
->arch
.vpa_update_lock
);
1148 harvest_vpa_dirty(&vcpu
->arch
.vpa
, memslot
, map
);
1149 harvest_vpa_dirty(&vcpu
->arch
.dtl
, memslot
, map
);
1150 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
1156 void *kvmppc_pin_guest_page(struct kvm
*kvm
, unsigned long gpa
,
1157 unsigned long *nb_ret
)
1159 struct kvm_memory_slot
*memslot
;
1160 unsigned long gfn
= gpa
>> PAGE_SHIFT
;
1161 struct page
*page
, *pages
[1];
1163 unsigned long hva
, offset
;
1165 unsigned long *physp
;
1168 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
1169 memslot
= gfn_to_memslot(kvm
, gfn
);
1170 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
1172 if (!kvm
->arch
.using_mmu_notifiers
) {
1173 physp
= memslot
->arch
.slot_phys
;
1176 physp
+= gfn
- memslot
->base_gfn
;
1179 if (kvmppc_get_guest_page(kvm
, gfn
, memslot
,
1184 page
= pfn_to_page(pa
>> PAGE_SHIFT
);
1187 hva
= gfn_to_hva_memslot(memslot
, gfn
);
1188 npages
= get_user_pages_fast(hva
, 1, 1, pages
);
1193 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1195 offset
= gpa
& (PAGE_SIZE
- 1);
1197 *nb_ret
= PAGE_SIZE
- offset
;
1198 return page_address(page
) + offset
;
1201 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1205 void kvmppc_unpin_guest_page(struct kvm
*kvm
, void *va
, unsigned long gpa
,
1208 struct page
*page
= virt_to_page(va
);
1209 struct kvm_memory_slot
*memslot
;
1211 unsigned long *rmap
;
1216 if (!dirty
|| !kvm
->arch
.using_mmu_notifiers
)
1219 /* We need to mark this page dirty in the rmap chain */
1220 gfn
= gpa
>> PAGE_SHIFT
;
1221 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
1222 memslot
= gfn_to_memslot(kvm
, gfn
);
1224 rmap
= &memslot
->arch
.rmap
[gfn
- memslot
->base_gfn
];
1226 *rmap
|= KVMPPC_RMAP_CHANGED
;
1229 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1233 * Functions for reading and writing the hash table via reads and
1234 * writes on a file descriptor.
1236 * Reads return the guest view of the hash table, which has to be
1237 * pieced together from the real hash table and the guest_rpte
1238 * values in the revmap array.
1240 * On writes, each HPTE written is considered in turn, and if it
1241 * is valid, it is written to the HPT as if an H_ENTER with the
1242 * exact flag set was done. When the invalid count is non-zero
1243 * in the header written to the stream, the kernel will make
1244 * sure that that many HPTEs are invalid, and invalidate them
1248 struct kvm_htab_ctx
{
1249 unsigned long index
;
1250 unsigned long flags
;
1255 #define HPTE_SIZE (2 * sizeof(unsigned long))
1258 * Returns 1 if this HPT entry has been modified or has pending
1261 static int hpte_dirty(struct revmap_entry
*revp
, unsigned long *hptp
)
1263 unsigned long rcbits_unset
;
1265 if (revp
->guest_rpte
& HPTE_GR_MODIFIED
)
1268 /* Also need to consider changes in reference and changed bits */
1269 rcbits_unset
= ~revp
->guest_rpte
& (HPTE_R_R
| HPTE_R_C
);
1270 if ((hptp
[0] & HPTE_V_VALID
) && (hptp
[1] & rcbits_unset
))
1276 static long record_hpte(unsigned long flags
, unsigned long *hptp
,
1277 unsigned long *hpte
, struct revmap_entry
*revp
,
1278 int want_valid
, int first_pass
)
1281 unsigned long rcbits_unset
;
1285 /* Unmodified entries are uninteresting except on the first pass */
1286 dirty
= hpte_dirty(revp
, hptp
);
1287 if (!first_pass
&& !dirty
)
1291 if (hptp
[0] & (HPTE_V_VALID
| HPTE_V_ABSENT
)) {
1293 if ((flags
& KVM_GET_HTAB_BOLTED_ONLY
) &&
1294 !(hptp
[0] & HPTE_V_BOLTED
))
1297 if (valid
!= want_valid
)
1301 if (valid
|| dirty
) {
1302 /* lock the HPTE so it's stable and read it */
1304 while (!try_lock_hpte(hptp
, HPTE_V_HVLOCK
))
1308 /* re-evaluate valid and dirty from synchronized HPTE value */
1309 valid
= !!(v
& HPTE_V_VALID
);
1310 dirty
= !!(revp
->guest_rpte
& HPTE_GR_MODIFIED
);
1312 /* Harvest R and C into guest view if necessary */
1313 rcbits_unset
= ~revp
->guest_rpte
& (HPTE_R_R
| HPTE_R_C
);
1314 if (valid
&& (rcbits_unset
& hptp
[1])) {
1315 revp
->guest_rpte
|= (hptp
[1] & (HPTE_R_R
| HPTE_R_C
)) |
1320 if (v
& HPTE_V_ABSENT
) {
1321 v
&= ~HPTE_V_ABSENT
;
1325 if ((flags
& KVM_GET_HTAB_BOLTED_ONLY
) && !(v
& HPTE_V_BOLTED
))
1328 r
= revp
->guest_rpte
;
1329 /* only clear modified if this is the right sort of entry */
1330 if (valid
== want_valid
&& dirty
) {
1331 r
&= ~HPTE_GR_MODIFIED
;
1332 revp
->guest_rpte
= r
;
1334 asm volatile(PPC_RELEASE_BARRIER
"" : : : "memory");
1335 hptp
[0] &= ~HPTE_V_HVLOCK
;
1337 if (!(valid
== want_valid
&& (first_pass
|| dirty
)))
1345 static ssize_t
kvm_htab_read(struct file
*file
, char __user
*buf
,
1346 size_t count
, loff_t
*ppos
)
1348 struct kvm_htab_ctx
*ctx
= file
->private_data
;
1349 struct kvm
*kvm
= ctx
->kvm
;
1350 struct kvm_get_htab_header hdr
;
1351 unsigned long *hptp
;
1352 struct revmap_entry
*revp
;
1353 unsigned long i
, nb
, nw
;
1354 unsigned long __user
*lbuf
;
1355 struct kvm_get_htab_header __user
*hptr
;
1356 unsigned long flags
;
1358 unsigned long hpte
[2];
1360 if (!access_ok(VERIFY_WRITE
, buf
, count
))
1363 first_pass
= ctx
->first_pass
;
1367 hptp
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (i
* HPTE_SIZE
));
1368 revp
= kvm
->arch
.revmap
+ i
;
1369 lbuf
= (unsigned long __user
*)buf
;
1372 while (nb
+ sizeof(hdr
) + HPTE_SIZE
< count
) {
1373 /* Initialize header */
1374 hptr
= (struct kvm_get_htab_header __user
*)buf
;
1379 lbuf
= (unsigned long __user
*)(buf
+ sizeof(hdr
));
1381 /* Skip uninteresting entries, i.e. clean on not-first pass */
1383 while (i
< kvm
->arch
.hpt_npte
&&
1384 !hpte_dirty(revp
, hptp
)) {
1392 /* Grab a series of valid entries */
1393 while (i
< kvm
->arch
.hpt_npte
&&
1394 hdr
.n_valid
< 0xffff &&
1395 nb
+ HPTE_SIZE
< count
&&
1396 record_hpte(flags
, hptp
, hpte
, revp
, 1, first_pass
)) {
1397 /* valid entry, write it out */
1399 if (__put_user(hpte
[0], lbuf
) ||
1400 __put_user(hpte
[1], lbuf
+ 1))
1408 /* Now skip invalid entries while we can */
1409 while (i
< kvm
->arch
.hpt_npte
&&
1410 hdr
.n_invalid
< 0xffff &&
1411 record_hpte(flags
, hptp
, hpte
, revp
, 0, first_pass
)) {
1412 /* found an invalid entry */
1419 if (hdr
.n_valid
|| hdr
.n_invalid
) {
1420 /* write back the header */
1421 if (__copy_to_user(hptr
, &hdr
, sizeof(hdr
)))
1424 buf
= (char __user
*)lbuf
;
1429 /* Check if we've wrapped around the hash table */
1430 if (i
>= kvm
->arch
.hpt_npte
) {
1432 ctx
->first_pass
= 0;
1442 static ssize_t
kvm_htab_write(struct file
*file
, const char __user
*buf
,
1443 size_t count
, loff_t
*ppos
)
1445 struct kvm_htab_ctx
*ctx
= file
->private_data
;
1446 struct kvm
*kvm
= ctx
->kvm
;
1447 struct kvm_get_htab_header hdr
;
1450 unsigned long __user
*lbuf
;
1451 unsigned long *hptp
;
1452 unsigned long tmp
[2];
1457 if (!access_ok(VERIFY_READ
, buf
, count
))
1460 /* lock out vcpus from running while we're doing this */
1461 mutex_lock(&kvm
->lock
);
1462 rma_setup
= kvm
->arch
.rma_setup_done
;
1464 kvm
->arch
.rma_setup_done
= 0; /* temporarily */
1465 /* order rma_setup_done vs. vcpus_running */
1467 if (atomic_read(&kvm
->arch
.vcpus_running
)) {
1468 kvm
->arch
.rma_setup_done
= 1;
1469 mutex_unlock(&kvm
->lock
);
1475 for (nb
= 0; nb
+ sizeof(hdr
) <= count
; ) {
1477 if (__copy_from_user(&hdr
, buf
, sizeof(hdr
)))
1481 if (nb
+ hdr
.n_valid
* HPTE_SIZE
> count
)
1489 if (i
>= kvm
->arch
.hpt_npte
||
1490 i
+ hdr
.n_valid
+ hdr
.n_invalid
> kvm
->arch
.hpt_npte
)
1493 hptp
= (unsigned long *)(kvm
->arch
.hpt_virt
+ (i
* HPTE_SIZE
));
1494 lbuf
= (unsigned long __user
*)buf
;
1495 for (j
= 0; j
< hdr
.n_valid
; ++j
) {
1497 if (__get_user(v
, lbuf
) || __get_user(r
, lbuf
+ 1))
1500 if (!(v
& HPTE_V_VALID
))
1505 if (hptp
[0] & (HPTE_V_VALID
| HPTE_V_ABSENT
))
1506 kvmppc_do_h_remove(kvm
, 0, i
, 0, tmp
);
1508 ret
= kvmppc_virtmode_do_h_enter(kvm
, H_EXACT
, i
, v
, r
,
1510 if (ret
!= H_SUCCESS
) {
1511 pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
1512 "r=%lx\n", ret
, i
, v
, r
);
1515 if (!rma_setup
&& is_vrma_hpte(v
)) {
1516 unsigned long psize
= hpte_page_size(v
, r
);
1517 unsigned long senc
= slb_pgsize_encoding(psize
);
1520 kvm
->arch
.vrma_slb_v
= senc
| SLB_VSID_B_1T
|
1521 (VRMA_VSID
<< SLB_VSID_SHIFT_1T
);
1522 lpcr
= senc
<< (LPCR_VRMASD_SH
- 4);
1523 kvmppc_update_lpcr(kvm
, lpcr
, LPCR_VRMASD
);
1530 for (j
= 0; j
< hdr
.n_invalid
; ++j
) {
1531 if (hptp
[0] & (HPTE_V_VALID
| HPTE_V_ABSENT
))
1532 kvmppc_do_h_remove(kvm
, 0, i
, 0, tmp
);
1540 /* Order HPTE updates vs. rma_setup_done */
1542 kvm
->arch
.rma_setup_done
= rma_setup
;
1543 mutex_unlock(&kvm
->lock
);
1550 static int kvm_htab_release(struct inode
*inode
, struct file
*filp
)
1552 struct kvm_htab_ctx
*ctx
= filp
->private_data
;
1554 filp
->private_data
= NULL
;
1555 if (!(ctx
->flags
& KVM_GET_HTAB_WRITE
))
1556 atomic_dec(&ctx
->kvm
->arch
.hpte_mod_interest
);
1557 kvm_put_kvm(ctx
->kvm
);
1562 static const struct file_operations kvm_htab_fops
= {
1563 .read
= kvm_htab_read
,
1564 .write
= kvm_htab_write
,
1565 .llseek
= default_llseek
,
1566 .release
= kvm_htab_release
,
1569 int kvm_vm_ioctl_get_htab_fd(struct kvm
*kvm
, struct kvm_get_htab_fd
*ghf
)
1572 struct kvm_htab_ctx
*ctx
;
1575 /* reject flags we don't recognize */
1576 if (ghf
->flags
& ~(KVM_GET_HTAB_BOLTED_ONLY
| KVM_GET_HTAB_WRITE
))
1578 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
1583 ctx
->index
= ghf
->start_index
;
1584 ctx
->flags
= ghf
->flags
;
1585 ctx
->first_pass
= 1;
1587 rwflag
= (ghf
->flags
& KVM_GET_HTAB_WRITE
) ? O_WRONLY
: O_RDONLY
;
1588 ret
= anon_inode_getfd("kvm-htab", &kvm_htab_fops
, ctx
, rwflag
| O_CLOEXEC
);
1594 if (rwflag
== O_RDONLY
) {
1595 mutex_lock(&kvm
->slots_lock
);
1596 atomic_inc(&kvm
->arch
.hpte_mod_interest
);
1597 /* make sure kvmppc_do_h_enter etc. see the increment */
1598 synchronize_srcu_expedited(&kvm
->srcu
);
1599 mutex_unlock(&kvm
->slots_lock
);
1605 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu
*vcpu
)
1607 struct kvmppc_mmu
*mmu
= &vcpu
->arch
.mmu
;
1609 if (cpu_has_feature(CPU_FTR_ARCH_206
))
1610 vcpu
->arch
.slb_nr
= 32; /* POWER7 */
1612 vcpu
->arch
.slb_nr
= 64;
1614 mmu
->xlate
= kvmppc_mmu_book3s_64_hv_xlate
;
1615 mmu
->reset_msr
= kvmppc_mmu_book3s_64_hv_reset_msr
;
1617 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_SLB
;