2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS MMU handling in the KVM module.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <asm/mmu_context.h>
16 static u32
kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
18 int cpu
= smp_processor_id();
20 return vcpu
->arch
.guest_kernel_asid
[cpu
] &
21 cpu_asid_mask(&cpu_data
[cpu
]);
24 static u32
kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
26 int cpu
= smp_processor_id();
28 return vcpu
->arch
.guest_user_asid
[cpu
] &
29 cpu_asid_mask(&cpu_data
[cpu
]);
32 static int kvm_mips_map_page(struct kvm
*kvm
, gfn_t gfn
)
34 int srcu_idx
, err
= 0;
37 if (kvm
->arch
.guest_pmap
[gfn
] != KVM_INVALID_PAGE
)
40 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
41 pfn
= gfn_to_pfn(kvm
, gfn
);
43 if (is_error_noslot_pfn(pfn
)) {
44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn
);
49 kvm
->arch
.guest_pmap
[gfn
] = pfn
;
51 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
55 /* Translate guest KSEG0 addresses to Host PA */
56 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu
*vcpu
,
60 unsigned long offset
= gva
& ~PAGE_MASK
;
61 struct kvm
*kvm
= vcpu
->kvm
;
63 if (KVM_GUEST_KSEGX(gva
) != KVM_GUEST_KSEG0
) {
64 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__
,
65 __builtin_return_address(0), gva
);
66 return KVM_INVALID_PAGE
;
69 gfn
= (KVM_GUEST_CPHYSADDR(gva
) >> PAGE_SHIFT
);
71 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
72 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__
, gfn
,
74 return KVM_INVALID_PAGE
;
77 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
78 return KVM_INVALID_ADDR
;
80 return (kvm
->arch
.guest_pmap
[gfn
] << PAGE_SHIFT
) + offset
;
83 /* XXXKYMA: Must be called with interrupts disabled */
84 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
85 struct kvm_vcpu
*vcpu
)
89 unsigned long vaddr
= 0;
90 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
91 struct kvm
*kvm
= vcpu
->kvm
;
92 const int flush_dcache_mask
= 0;
95 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
96 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
97 kvm_mips_dump_host_tlbs();
101 gfn
= (KVM_GUEST_CPHYSADDR(badvaddr
) >> PAGE_SHIFT
);
102 if ((gfn
| 1) >= kvm
->arch
.guest_pmap_npages
) {
103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__
,
105 kvm_mips_dump_host_tlbs();
108 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
110 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
113 if (kvm_mips_map_page(vcpu
->kvm
, gfn
^ 0x1) < 0)
116 pfn0
= kvm
->arch
.guest_pmap
[gfn
& ~0x1];
117 pfn1
= kvm
->arch
.guest_pmap
[gfn
| 0x1];
119 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) |
120 ((_page_cachable_default
>> _CACHE_SHIFT
) << ENTRYLO_C_SHIFT
) |
121 ENTRYLO_D
| ENTRYLO_V
;
122 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) |
123 ((_page_cachable_default
>> _CACHE_SHIFT
) << ENTRYLO_C_SHIFT
) |
124 ENTRYLO_D
| ENTRYLO_V
;
127 entryhi
= (vaddr
| kvm_mips_get_kernel_asid(vcpu
));
128 ret
= kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
135 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
136 struct kvm_mips_tlb
*tlb
)
138 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
139 struct kvm
*kvm
= vcpu
->kvm
;
140 kvm_pfn_t pfn0
, pfn1
;
145 tlb_lo
[0] = tlb
->tlb_lo
[0];
146 tlb_lo
[1] = tlb
->tlb_lo
[1];
149 * The commpage address must not be mapped to anything else if the guest
150 * TLB contains entries nearby, or commpage accesses will break.
152 if (!((tlb
->tlb_hi
^ KVM_GUEST_COMMPAGE_ADDR
) &
153 VPN2_MASK
& (PAGE_MASK
<< 1)))
154 tlb_lo
[(KVM_GUEST_COMMPAGE_ADDR
>> PAGE_SHIFT
) & 1] = 0;
156 gfn0
= mips3_tlbpfn_to_paddr(tlb_lo
[0]) >> PAGE_SHIFT
;
157 gfn1
= mips3_tlbpfn_to_paddr(tlb_lo
[1]) >> PAGE_SHIFT
;
158 if (gfn0
>= kvm
->arch
.guest_pmap_npages
||
159 gfn1
>= kvm
->arch
.guest_pmap_npages
) {
160 kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
161 __func__
, gfn0
, gfn1
, tlb
->tlb_hi
);
162 kvm_mips_dump_guest_tlbs(vcpu
);
166 if (kvm_mips_map_page(kvm
, gfn0
) < 0)
169 if (kvm_mips_map_page(kvm
, gfn1
) < 0)
172 pfn0
= kvm
->arch
.guest_pmap
[gfn0
];
173 pfn1
= kvm
->arch
.guest_pmap
[gfn1
];
175 /* Get attributes from the Guest TLB */
176 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) |
177 ((_page_cachable_default
>> _CACHE_SHIFT
) << ENTRYLO_C_SHIFT
) |
178 (tlb_lo
[0] & ENTRYLO_D
) |
179 (tlb_lo
[0] & ENTRYLO_V
);
180 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) |
181 ((_page_cachable_default
>> _CACHE_SHIFT
) << ENTRYLO_C_SHIFT
) |
182 (tlb_lo
[1] & ENTRYLO_D
) |
183 (tlb_lo
[1] & ENTRYLO_V
);
185 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
186 tlb
->tlb_lo
[0], tlb
->tlb_lo
[1]);
189 entryhi
= (tlb
->tlb_hi
& VPN2_MASK
) | (KVM_GUEST_KERNEL_MODE(vcpu
) ?
190 kvm_mips_get_kernel_asid(vcpu
) :
191 kvm_mips_get_user_asid(vcpu
));
192 ret
= kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
199 void kvm_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
,
200 struct kvm_vcpu
*vcpu
)
202 unsigned long asid
= asid_cache(cpu
);
204 asid
+= cpu_asid_inc();
205 if (!(asid
& cpu_asid_mask(&cpu_data
[cpu
]))) {
206 if (cpu_has_vtag_icache
)
209 kvm_local_flush_tlb_all(); /* start new asid cycle */
211 if (!asid
) /* fix version if needed */
212 asid
= asid_first_version(cpu
);
215 cpu_context(cpu
, mm
) = asid_cache(cpu
) = asid
;
219 * kvm_mips_migrate_count() - Migrate timer.
220 * @vcpu: Virtual CPU.
222 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
223 * if it was running prior to being cancelled.
225 * Must be called when the VCPU is migrated to a different CPU to ensure that
226 * timer expiry during guest execution interrupts the guest and causes the
227 * interrupt to be delivered in a timely manner.
229 static void kvm_mips_migrate_count(struct kvm_vcpu
*vcpu
)
231 if (hrtimer_cancel(&vcpu
->arch
.comparecount_timer
))
232 hrtimer_restart(&vcpu
->arch
.comparecount_timer
);
235 /* Restore ASID once we are scheduled back after preemption */
236 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
238 unsigned long asid_mask
= cpu_asid_mask(&cpu_data
[cpu
]);
242 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
244 /* Allocate new kernel and user ASIDs if needed */
246 local_irq_save(flags
);
248 if ((vcpu
->arch
.guest_kernel_asid
[cpu
] ^ asid_cache(cpu
)) &
249 asid_version_mask(cpu
)) {
250 kvm_get_new_mmu_context(&vcpu
->arch
.guest_kernel_mm
, cpu
, vcpu
);
251 vcpu
->arch
.guest_kernel_asid
[cpu
] =
252 vcpu
->arch
.guest_kernel_mm
.context
.asid
[cpu
];
253 kvm_get_new_mmu_context(&vcpu
->arch
.guest_user_mm
, cpu
, vcpu
);
254 vcpu
->arch
.guest_user_asid
[cpu
] =
255 vcpu
->arch
.guest_user_mm
.context
.asid
[cpu
];
258 kvm_debug("[%d]: cpu_context: %#lx\n", cpu
,
259 cpu_context(cpu
, current
->mm
));
260 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
261 cpu
, vcpu
->arch
.guest_kernel_asid
[cpu
]);
262 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu
,
263 vcpu
->arch
.guest_user_asid
[cpu
]);
266 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
267 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
268 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
270 * Migrate the timer interrupt to the current CPU so that it
271 * always interrupts the guest and synchronously triggers a
272 * guest timer interrupt.
274 kvm_mips_migrate_count(vcpu
);
279 * If we preempted while the guest was executing, then reload
280 * the pre-empted ASID
282 if (current
->flags
& PF_VCPU
) {
283 write_c0_entryhi(vcpu
->arch
.
284 preempt_entryhi
& asid_mask
);
288 /* New ASIDs were allocated for the VM */
291 * Were we in guest context? If so then the pre-empted ASID is
292 * no longer valid, we need to set it to what it should be based
293 * on the mode of the Guest (Kernel/User)
295 if (current
->flags
& PF_VCPU
) {
296 if (KVM_GUEST_KERNEL_MODE(vcpu
))
297 write_c0_entryhi(vcpu
->arch
.
298 guest_kernel_asid
[cpu
] &
301 write_c0_entryhi(vcpu
->arch
.
302 guest_user_asid
[cpu
] &
308 /* restore guest state to registers */
309 kvm_mips_callbacks
->vcpu_set_regs(vcpu
);
311 local_irq_restore(flags
);
315 /* ASID can change if another task is scheduled during preemption */
316 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
321 local_irq_save(flags
);
323 cpu
= smp_processor_id();
325 vcpu
->arch
.preempt_entryhi
= read_c0_entryhi();
326 vcpu
->arch
.last_sched_cpu
= cpu
;
328 /* save guest state in registers */
329 kvm_mips_callbacks
->vcpu_get_regs(vcpu
);
331 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
332 asid_version_mask(cpu
))) {
333 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__
,
334 cpu_context(cpu
, current
->mm
));
335 drop_mmu_context(current
->mm
, cpu
);
337 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
340 local_irq_restore(flags
);
343 u32
kvm_get_inst(u32
*opc
, struct kvm_vcpu
*vcpu
)
345 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
346 unsigned long paddr
, flags
, vpn2
, asid
;
347 unsigned long va
= (unsigned long)opc
;
352 if (KVM_GUEST_KSEGX(va
) < KVM_GUEST_KSEG0
||
353 KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG23
) {
354 local_irq_save(flags
);
355 index
= kvm_mips_host_tlb_lookup(vcpu
, va
);
359 vpn2
= va
& VPN2_MASK
;
360 asid
= kvm_read_c0_guest_entryhi(cop0
) &
362 index
= kvm_mips_guest_tlb_lookup(vcpu
, vpn2
| asid
);
364 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
365 __func__
, opc
, vcpu
, read_c0_entryhi());
366 kvm_mips_dump_host_tlbs();
367 kvm_mips_dump_guest_tlbs(vcpu
);
368 local_irq_restore(flags
);
369 return KVM_INVALID_INST
;
371 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu
,
372 &vcpu
->arch
.guest_tlb
[index
])) {
373 kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
374 __func__
, opc
, index
, vcpu
,
376 kvm_mips_dump_guest_tlbs(vcpu
);
377 local_irq_restore(flags
);
378 return KVM_INVALID_INST
;
382 local_irq_restore(flags
);
383 } else if (KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG0
) {
384 paddr
= kvm_mips_translate_guest_kseg0_to_hpa(vcpu
, va
);
385 vaddr
= kmap_atomic(pfn_to_page(PHYS_PFN(paddr
)));
386 vaddr
+= paddr
& ~PAGE_MASK
;
387 inst
= *(u32
*)vaddr
;
388 kunmap_atomic(vaddr
);
390 kvm_err("%s: illegal address: %p\n", __func__
, opc
);
391 return KVM_INVALID_INST
;