1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Alexander Graf <agraf@suse.de>
7 * Kevin Wolf <mail@kevin-wolf.de>
8 * Paul Mackerras <paulus@samba.org>
11 * Functions relating to running KVM on Book 3S processors where
12 * we don't have access to hypervisor mode, and we run the guest
13 * in problem state (user mode).
15 * This file is derived from arch/powerpc/kvm/44x.c,
16 * by Hollis Blanchard <hollisb@us.ibm.com>.
19 #include <linux/kvm_host.h>
20 #include <linux/export.h>
21 #include <linux/err.h>
22 #include <linux/slab.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <linux/uaccess.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/kvm_book3s.h>
31 #include <asm/mmu_context.h>
32 #include <asm/switch_to.h>
33 #include <asm/firmware.h>
34 #include <asm/setup.h>
35 #include <linux/gfp.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38 #include <linux/highmem.h>
39 #include <linux/module.h>
40 #include <linux/miscdevice.h>
41 #include <asm/asm-prototypes.h>
46 #define CREATE_TRACE_POINTS
49 /* #define EXIT_DEBUG */
50 /* #define DEBUG_EXT */
52 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
54 #ifdef CONFIG_PPC_BOOK3S_64
55 static int kvmppc_handle_fac(struct kvm_vcpu
*vcpu
, ulong fac
);
58 /* Some compatibility defines */
59 #ifdef CONFIG_PPC_BOOK3S_32
60 #define MSR_USER32 MSR_USER
61 #define MSR_USER64 MSR_USER
62 #define HW_PAGE_SIZE PAGE_SIZE
63 #define HPTE_R_M _PAGE_COHERENT
66 static bool kvmppc_is_split_real(struct kvm_vcpu
*vcpu
)
68 ulong msr
= kvmppc_get_msr(vcpu
);
69 return (msr
& (MSR_IR
|MSR_DR
)) == MSR_DR
;
72 static void kvmppc_fixup_split_real(struct kvm_vcpu
*vcpu
)
74 ulong msr
= kvmppc_get_msr(vcpu
);
75 ulong pc
= kvmppc_get_pc(vcpu
);
77 /* We are in DR only split real mode */
78 if ((msr
& (MSR_IR
|MSR_DR
)) != MSR_DR
)
81 /* We have not fixed up the guest already */
82 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
)
85 /* The code is in fixupable address space */
86 if (pc
& SPLIT_HACK_MASK
)
89 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_SPLIT_HACK
;
90 kvmppc_set_pc(vcpu
, pc
| SPLIT_HACK_OFFS
);
93 static void kvmppc_unfixup_split_real(struct kvm_vcpu
*vcpu
)
95 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) {
96 ulong pc
= kvmppc_get_pc(vcpu
);
97 ulong lr
= kvmppc_get_lr(vcpu
);
98 if ((pc
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
)
99 kvmppc_set_pc(vcpu
, pc
& ~SPLIT_HACK_MASK
);
100 if ((lr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
)
101 kvmppc_set_lr(vcpu
, lr
& ~SPLIT_HACK_MASK
);
102 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SPLIT_HACK
;
106 static void kvmppc_inject_interrupt_pr(struct kvm_vcpu
*vcpu
, int vec
, u64 srr1_flags
)
108 unsigned long msr
, pc
, new_msr
, new_pc
;
110 kvmppc_unfixup_split_real(vcpu
);
112 msr
= kvmppc_get_msr(vcpu
);
113 pc
= kvmppc_get_pc(vcpu
);
114 new_msr
= vcpu
->arch
.intr_msr
;
115 new_pc
= to_book3s(vcpu
)->hior
+ vec
;
117 #ifdef CONFIG_PPC_BOOK3S_64
118 /* If transactional, change to suspend mode on IRQ delivery */
119 if (MSR_TM_TRANSACTIONAL(msr
))
122 new_msr
|= msr
& MSR_TS_MASK
;
125 kvmppc_set_srr0(vcpu
, pc
);
126 kvmppc_set_srr1(vcpu
, (msr
& SRR1_MSR_BITS
) | srr1_flags
);
127 kvmppc_set_pc(vcpu
, new_pc
);
128 kvmppc_set_msr(vcpu
, new_msr
);
131 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu
*vcpu
, int cpu
)
133 #ifdef CONFIG_PPC_BOOK3S_64
134 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
135 memcpy(svcpu
->slb
, to_book3s(vcpu
)->slb_shadow
, sizeof(svcpu
->slb
));
136 svcpu
->slb_max
= to_book3s(vcpu
)->slb_shadow_max
;
141 /* Disable AIL if supported */
142 if (cpu_has_feature(CPU_FTR_HVMODE
) &&
143 cpu_has_feature(CPU_FTR_ARCH_207S
))
144 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) & ~LPCR_AIL
);
146 vcpu
->cpu
= smp_processor_id();
147 #ifdef CONFIG_PPC_BOOK3S_32
148 current
->thread
.kvm_shadow_vcpu
= vcpu
->arch
.shadow_vcpu
;
151 if (kvmppc_is_split_real(vcpu
))
152 kvmppc_fixup_split_real(vcpu
);
154 kvmppc_restore_tm_pr(vcpu
);
157 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu
*vcpu
)
159 #ifdef CONFIG_PPC_BOOK3S_64
160 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
162 kvmppc_copy_from_svcpu(vcpu
);
164 memcpy(to_book3s(vcpu
)->slb_shadow
, svcpu
->slb
, sizeof(svcpu
->slb
));
165 to_book3s(vcpu
)->slb_shadow_max
= svcpu
->slb_max
;
169 if (kvmppc_is_split_real(vcpu
))
170 kvmppc_unfixup_split_real(vcpu
);
172 kvmppc_giveup_ext(vcpu
, MSR_FP
| MSR_VEC
| MSR_VSX
);
173 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
174 kvmppc_save_tm_pr(vcpu
);
176 /* Enable AIL if supported */
177 if (cpu_has_feature(CPU_FTR_HVMODE
) &&
178 cpu_has_feature(CPU_FTR_ARCH_207S
))
179 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) | LPCR_AIL_3
);
184 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
185 void kvmppc_copy_to_svcpu(struct kvm_vcpu
*vcpu
)
187 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
189 svcpu
->gpr
[0] = vcpu
->arch
.regs
.gpr
[0];
190 svcpu
->gpr
[1] = vcpu
->arch
.regs
.gpr
[1];
191 svcpu
->gpr
[2] = vcpu
->arch
.regs
.gpr
[2];
192 svcpu
->gpr
[3] = vcpu
->arch
.regs
.gpr
[3];
193 svcpu
->gpr
[4] = vcpu
->arch
.regs
.gpr
[4];
194 svcpu
->gpr
[5] = vcpu
->arch
.regs
.gpr
[5];
195 svcpu
->gpr
[6] = vcpu
->arch
.regs
.gpr
[6];
196 svcpu
->gpr
[7] = vcpu
->arch
.regs
.gpr
[7];
197 svcpu
->gpr
[8] = vcpu
->arch
.regs
.gpr
[8];
198 svcpu
->gpr
[9] = vcpu
->arch
.regs
.gpr
[9];
199 svcpu
->gpr
[10] = vcpu
->arch
.regs
.gpr
[10];
200 svcpu
->gpr
[11] = vcpu
->arch
.regs
.gpr
[11];
201 svcpu
->gpr
[12] = vcpu
->arch
.regs
.gpr
[12];
202 svcpu
->gpr
[13] = vcpu
->arch
.regs
.gpr
[13];
203 svcpu
->cr
= vcpu
->arch
.regs
.ccr
;
204 svcpu
->xer
= vcpu
->arch
.regs
.xer
;
205 svcpu
->ctr
= vcpu
->arch
.regs
.ctr
;
206 svcpu
->lr
= vcpu
->arch
.regs
.link
;
207 svcpu
->pc
= vcpu
->arch
.regs
.nip
;
208 #ifdef CONFIG_PPC_BOOK3S_64
209 svcpu
->shadow_fscr
= vcpu
->arch
.shadow_fscr
;
212 * Now also save the current time base value. We use this
213 * to find the guest purr and spurr value.
215 vcpu
->arch
.entry_tb
= get_tb();
216 vcpu
->arch
.entry_vtb
= get_vtb();
217 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
218 vcpu
->arch
.entry_ic
= mfspr(SPRN_IC
);
219 svcpu
->in_use
= true;
224 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu
*vcpu
)
226 ulong guest_msr
= kvmppc_get_msr(vcpu
);
227 ulong smsr
= guest_msr
;
229 /* Guest MSR values */
230 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
231 smsr
&= MSR_FE0
| MSR_FE1
| MSR_SF
| MSR_SE
| MSR_BE
| MSR_LE
|
232 MSR_TM
| MSR_TS_MASK
;
234 smsr
&= MSR_FE0
| MSR_FE1
| MSR_SF
| MSR_SE
| MSR_BE
| MSR_LE
;
236 /* Process MSR values */
237 smsr
|= MSR_ME
| MSR_RI
| MSR_IR
| MSR_DR
| MSR_PR
| MSR_EE
;
238 /* External providers the guest reserved */
239 smsr
|= (guest_msr
& vcpu
->arch
.guest_owned_ext
);
240 /* 64-bit Process MSR values */
241 #ifdef CONFIG_PPC_BOOK3S_64
244 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
246 * in guest privileged state, we want to fail all TM transactions.
247 * So disable MSR TM bit so that all tbegin. will be able to be
250 if (!(guest_msr
& MSR_PR
))
253 vcpu
->arch
.shadow_msr
= smsr
;
256 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
257 void kvmppc_copy_from_svcpu(struct kvm_vcpu
*vcpu
)
259 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
260 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
265 * Maybe we were already preempted and synced the svcpu from
266 * our preempt notifiers. Don't bother touching this svcpu then.
271 vcpu
->arch
.regs
.gpr
[0] = svcpu
->gpr
[0];
272 vcpu
->arch
.regs
.gpr
[1] = svcpu
->gpr
[1];
273 vcpu
->arch
.regs
.gpr
[2] = svcpu
->gpr
[2];
274 vcpu
->arch
.regs
.gpr
[3] = svcpu
->gpr
[3];
275 vcpu
->arch
.regs
.gpr
[4] = svcpu
->gpr
[4];
276 vcpu
->arch
.regs
.gpr
[5] = svcpu
->gpr
[5];
277 vcpu
->arch
.regs
.gpr
[6] = svcpu
->gpr
[6];
278 vcpu
->arch
.regs
.gpr
[7] = svcpu
->gpr
[7];
279 vcpu
->arch
.regs
.gpr
[8] = svcpu
->gpr
[8];
280 vcpu
->arch
.regs
.gpr
[9] = svcpu
->gpr
[9];
281 vcpu
->arch
.regs
.gpr
[10] = svcpu
->gpr
[10];
282 vcpu
->arch
.regs
.gpr
[11] = svcpu
->gpr
[11];
283 vcpu
->arch
.regs
.gpr
[12] = svcpu
->gpr
[12];
284 vcpu
->arch
.regs
.gpr
[13] = svcpu
->gpr
[13];
285 vcpu
->arch
.regs
.ccr
= svcpu
->cr
;
286 vcpu
->arch
.regs
.xer
= svcpu
->xer
;
287 vcpu
->arch
.regs
.ctr
= svcpu
->ctr
;
288 vcpu
->arch
.regs
.link
= svcpu
->lr
;
289 vcpu
->arch
.regs
.nip
= svcpu
->pc
;
290 vcpu
->arch
.shadow_srr1
= svcpu
->shadow_srr1
;
291 vcpu
->arch
.fault_dar
= svcpu
->fault_dar
;
292 vcpu
->arch
.fault_dsisr
= svcpu
->fault_dsisr
;
293 vcpu
->arch
.last_inst
= svcpu
->last_inst
;
294 #ifdef CONFIG_PPC_BOOK3S_64
295 vcpu
->arch
.shadow_fscr
= svcpu
->shadow_fscr
;
298 * Update purr and spurr using time base on exit.
300 vcpu
->arch
.purr
+= get_tb() - vcpu
->arch
.entry_tb
;
301 vcpu
->arch
.spurr
+= get_tb() - vcpu
->arch
.entry_tb
;
302 to_book3s(vcpu
)->vtb
+= get_vtb() - vcpu
->arch
.entry_vtb
;
303 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
304 vcpu
->arch
.ic
+= mfspr(SPRN_IC
) - vcpu
->arch
.entry_ic
;
306 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
308 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
310 * modified by unprivileged instructions like "tbegin"/"tend"/
311 * "tresume"/"tsuspend" in PR KVM guest.
313 * It is necessary to sync here to calculate a correct shadow_msr.
315 * privileged guest's tbegin will be failed at present. So we
316 * only take care of problem state guest.
318 old_msr
= kvmppc_get_msr(vcpu
);
319 if (unlikely((old_msr
& MSR_PR
) &&
320 (vcpu
->arch
.shadow_srr1
& (MSR_TS_MASK
)) !=
321 (old_msr
& (MSR_TS_MASK
)))) {
322 old_msr
&= ~(MSR_TS_MASK
);
323 old_msr
|= (vcpu
->arch
.shadow_srr1
& (MSR_TS_MASK
));
324 kvmppc_set_msr_fast(vcpu
, old_msr
);
325 kvmppc_recalc_shadow_msr(vcpu
);
329 svcpu
->in_use
= false;
335 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
336 void kvmppc_save_tm_sprs(struct kvm_vcpu
*vcpu
)
339 vcpu
->arch
.tfhar
= mfspr(SPRN_TFHAR
);
340 vcpu
->arch
.texasr
= mfspr(SPRN_TEXASR
);
341 vcpu
->arch
.tfiar
= mfspr(SPRN_TFIAR
);
345 void kvmppc_restore_tm_sprs(struct kvm_vcpu
*vcpu
)
348 mtspr(SPRN_TFHAR
, vcpu
->arch
.tfhar
);
349 mtspr(SPRN_TEXASR
, vcpu
->arch
.texasr
);
350 mtspr(SPRN_TFIAR
, vcpu
->arch
.tfiar
);
354 /* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
357 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu
*vcpu
)
360 ulong ext_diff
= (kvmppc_get_msr(vcpu
) & ~vcpu
->arch
.guest_owned_ext
) &
361 (MSR_FP
| MSR_VEC
| MSR_VSX
);
366 if (ext_diff
== MSR_FP
)
367 exit_nr
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
368 else if (ext_diff
== MSR_VEC
)
369 exit_nr
= BOOK3S_INTERRUPT_ALTIVEC
;
371 exit_nr
= BOOK3S_INTERRUPT_VSX
;
373 kvmppc_handle_ext(vcpu
, exit_nr
, ext_diff
);
376 void kvmppc_save_tm_pr(struct kvm_vcpu
*vcpu
)
378 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu
)))) {
379 kvmppc_save_tm_sprs(vcpu
);
383 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
384 kvmppc_giveup_ext(vcpu
, MSR_VSX
);
387 _kvmppc_save_tm_pr(vcpu
, mfmsr());
391 void kvmppc_restore_tm_pr(struct kvm_vcpu
*vcpu
)
393 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu
))) {
394 kvmppc_restore_tm_sprs(vcpu
);
395 if (kvmppc_get_msr(vcpu
) & MSR_TM
) {
396 kvmppc_handle_lost_math_exts(vcpu
);
397 if (vcpu
->arch
.fscr
& FSCR_TAR
)
398 kvmppc_handle_fac(vcpu
, FSCR_TAR_LG
);
404 _kvmppc_restore_tm_pr(vcpu
, kvmppc_get_msr(vcpu
));
407 if (kvmppc_get_msr(vcpu
) & MSR_TM
) {
408 kvmppc_handle_lost_math_exts(vcpu
);
409 if (vcpu
->arch
.fscr
& FSCR_TAR
)
410 kvmppc_handle_fac(vcpu
, FSCR_TAR_LG
);
415 static int kvmppc_core_check_requests_pr(struct kvm_vcpu
*vcpu
)
417 int r
= 1; /* Indicate we want to get back into the guest */
419 /* We misuse TLB_FLUSH to indicate that we want to clear
420 all shadow cache entries */
421 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
422 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
427 /************* MMU Notifiers *************/
428 static void do_kvm_unmap_hva(struct kvm
*kvm
, unsigned long start
,
432 struct kvm_vcpu
*vcpu
;
433 struct kvm_memslots
*slots
;
434 struct kvm_memory_slot
*memslot
;
436 slots
= kvm_memslots(kvm
);
437 kvm_for_each_memslot(memslot
, slots
) {
438 unsigned long hva_start
, hva_end
;
441 hva_start
= max(start
, memslot
->userspace_addr
);
442 hva_end
= min(end
, memslot
->userspace_addr
+
443 (memslot
->npages
<< PAGE_SHIFT
));
444 if (hva_start
>= hva_end
)
447 * {gfn(page) | page intersects with [hva_start, hva_end)} =
448 * {gfn, gfn+1, ..., gfn_end-1}.
450 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
451 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
452 kvm_for_each_vcpu(i
, vcpu
, kvm
)
453 kvmppc_mmu_pte_pflush(vcpu
, gfn
<< PAGE_SHIFT
,
454 gfn_end
<< PAGE_SHIFT
);
458 static int kvm_unmap_hva_range_pr(struct kvm
*kvm
, unsigned long start
,
461 do_kvm_unmap_hva(kvm
, start
, end
);
466 static int kvm_age_hva_pr(struct kvm
*kvm
, unsigned long start
,
469 /* XXX could be more clever ;) */
473 static int kvm_test_age_hva_pr(struct kvm
*kvm
, unsigned long hva
)
475 /* XXX could be more clever ;) */
479 static void kvm_set_spte_hva_pr(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
481 /* The page will get remapped properly on its next fault */
482 do_kvm_unmap_hva(kvm
, hva
, hva
+ PAGE_SIZE
);
485 /*****************************************/
487 static void kvmppc_set_msr_pr(struct kvm_vcpu
*vcpu
, u64 msr
)
491 /* For PAPR guest, make sure MSR reflects guest mode */
492 if (vcpu
->arch
.papr_enabled
)
493 msr
= (msr
& ~MSR_HV
) | MSR_ME
;
496 printk(KERN_INFO
"KVM: Set MSR to 0x%llx\n", msr
);
499 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
500 /* We should never target guest MSR to TS=10 && PR=0,
501 * since we always fail transaction for guest privilege
504 if (!(msr
& MSR_PR
) && MSR_TM_TRANSACTIONAL(msr
))
505 kvmppc_emulate_tabort(vcpu
,
506 TM_CAUSE_KVM_FAC_UNAV
| TM_CAUSE_PERSISTENT
);
509 old_msr
= kvmppc_get_msr(vcpu
);
510 msr
&= to_book3s(vcpu
)->msr_mask
;
511 kvmppc_set_msr_fast(vcpu
, msr
);
512 kvmppc_recalc_shadow_msr(vcpu
);
515 if (!vcpu
->arch
.pending_exceptions
) {
516 kvm_vcpu_block(vcpu
);
517 kvm_clear_request(KVM_REQ_UNHALT
, vcpu
);
518 vcpu
->stat
.halt_wakeup
++;
520 /* Unset POW bit after we woke up */
522 kvmppc_set_msr_fast(vcpu
, msr
);
526 if (kvmppc_is_split_real(vcpu
))
527 kvmppc_fixup_split_real(vcpu
);
529 kvmppc_unfixup_split_real(vcpu
);
531 if ((kvmppc_get_msr(vcpu
) & (MSR_PR
|MSR_IR
|MSR_DR
)) !=
532 (old_msr
& (MSR_PR
|MSR_IR
|MSR_DR
))) {
533 kvmppc_mmu_flush_segments(vcpu
);
534 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
536 /* Preload magic page segment when in kernel mode */
537 if (!(msr
& MSR_PR
) && vcpu
->arch
.magic_page_pa
) {
538 struct kvm_vcpu_arch
*a
= &vcpu
->arch
;
541 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_ea
);
543 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_pa
);
548 * When switching from 32 to 64-bit, we may have a stale 32-bit
549 * magic page around, we need to flush it. Typically 32-bit magic
550 * page will be instantiated when calling into RTAS. Note: We
551 * assume that such transition only happens while in kernel mode,
552 * ie, we never transition from user 32-bit to kernel 64-bit with
553 * a 32-bit magic page around.
555 if (vcpu
->arch
.magic_page_pa
&&
556 !(old_msr
& MSR_PR
) && !(old_msr
& MSR_SF
) && (msr
& MSR_SF
)) {
557 /* going from RTAS to normal kernel code */
558 kvmppc_mmu_pte_flush(vcpu
, (uint32_t)vcpu
->arch
.magic_page_pa
,
562 /* Preload FPU if it's enabled */
563 if (kvmppc_get_msr(vcpu
) & MSR_FP
)
564 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
566 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
567 if (kvmppc_get_msr(vcpu
) & MSR_TM
)
568 kvmppc_handle_lost_math_exts(vcpu
);
572 static void kvmppc_set_pvr_pr(struct kvm_vcpu
*vcpu
, u32 pvr
)
576 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SLB
;
577 vcpu
->arch
.pvr
= pvr
;
578 #ifdef CONFIG_PPC_BOOK3S_64
579 if ((pvr
>= 0x330000) && (pvr
< 0x70330000)) {
580 kvmppc_mmu_book3s_64_init(vcpu
);
581 if (!to_book3s(vcpu
)->hior_explicit
)
582 to_book3s(vcpu
)->hior
= 0xfff00000;
583 to_book3s(vcpu
)->msr_mask
= 0xffffffffffffffffULL
;
584 vcpu
->arch
.cpu_type
= KVM_CPU_3S_64
;
588 kvmppc_mmu_book3s_32_init(vcpu
);
589 if (!to_book3s(vcpu
)->hior_explicit
)
590 to_book3s(vcpu
)->hior
= 0;
591 to_book3s(vcpu
)->msr_mask
= 0xffffffffULL
;
592 vcpu
->arch
.cpu_type
= KVM_CPU_3S_32
;
595 kvmppc_sanity_check(vcpu
);
597 /* If we are in hypervisor level on 970, we can tell the CPU to
598 * treat DCBZ as 32 bytes store */
599 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_DCBZ32
;
600 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) && (mfmsr() & MSR_HV
) &&
601 !strcmp(cur_cpu_spec
->platform
, "ppc970"))
602 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
604 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
605 really needs them in a VM on Cell and force disable them. */
606 if (!strcmp(cur_cpu_spec
->platform
, "ppc-cell-be"))
607 to_book3s(vcpu
)->msr_mask
&= ~(MSR_FE0
| MSR_FE1
);
610 * If they're asking for POWER6 or later, set the flag
611 * indicating that we can do multiple large page sizes
613 * Also set the flag that indicates that tlbie has the large
614 * page bit in the RB operand instead of the instruction.
616 switch (PVR_VER(pvr
)) {
624 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_MULTI_PGSIZE
|
625 BOOK3S_HFLAG_NEW_TLBIE
;
629 #ifdef CONFIG_PPC_BOOK3S_32
630 /* 32 bit Book3S always has 32 byte dcbz */
631 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
634 /* On some CPUs we can execute paired single operations natively */
635 asm ( "mfpvr %0" : "=r"(host_pvr
));
637 case 0x00080200: /* lonestar 2.0 */
638 case 0x00088202: /* lonestar 2.2 */
639 case 0x70000100: /* gekko 1.0 */
640 case 0x00080100: /* gekko 2.0 */
641 case 0x00083203: /* gekko 2.3a */
642 case 0x00083213: /* gekko 2.3b */
643 case 0x00083204: /* gekko 2.4 */
644 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
645 case 0x00087200: /* broadway */
646 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_NATIVE_PS
;
647 /* Enable HID2.PSE - in case we need it later */
648 mtspr(SPRN_HID2_GEKKO
, mfspr(SPRN_HID2_GEKKO
) | (1 << 29));
652 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
653 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
654 * emulate 32 bytes dcbz length.
656 * The Book3s_64 inventors also realized this case and implemented a special bit
657 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
659 * My approach here is to patch the dcbz instruction on executing pages.
661 static void kvmppc_patch_dcbz(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
668 hpage
= gfn_to_page(vcpu
->kvm
, pte
->raddr
>> PAGE_SHIFT
);
669 if (is_error_page(hpage
))
672 hpage_offset
= pte
->raddr
& ~PAGE_MASK
;
673 hpage_offset
&= ~0xFFFULL
;
677 page
= kmap_atomic(hpage
);
679 /* patch dcbz into reserved instruction, so we trap */
680 for (i
=hpage_offset
; i
< hpage_offset
+ (HW_PAGE_SIZE
/ 4); i
++)
681 if ((be32_to_cpu(page
[i
]) & 0xff0007ff) == INS_DCBZ
)
682 page
[i
] &= cpu_to_be32(0xfffffff7);
688 static bool kvmppc_visible_gpa(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
690 ulong mp_pa
= vcpu
->arch
.magic_page_pa
;
692 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
693 mp_pa
= (uint32_t)mp_pa
;
696 if (unlikely(mp_pa
) && unlikely((mp_pa
& KVM_PAM
) == (gpa
& KVM_PAM
))) {
700 return kvm_is_visible_gfn(vcpu
->kvm
, gpa
>> PAGE_SHIFT
);
703 static int kvmppc_handle_pagefault(struct kvm_vcpu
*vcpu
,
704 ulong eaddr
, int vec
)
706 bool data
= (vec
== BOOK3S_INTERRUPT_DATA_STORAGE
);
707 bool iswrite
= false;
708 int r
= RESUME_GUEST
;
711 struct kvmppc_pte pte
= { 0 };
712 bool dr
= (kvmppc_get_msr(vcpu
) & MSR_DR
) ? true : false;
713 bool ir
= (kvmppc_get_msr(vcpu
) & MSR_IR
) ? true : false;
716 relocated
= data
? dr
: ir
;
717 if (data
&& (vcpu
->arch
.fault_dsisr
& DSISR_ISSTORE
))
720 /* Resolve real address if translation turned on */
722 page_found
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, &pte
, data
, iswrite
);
724 pte
.may_execute
= true;
726 pte
.may_write
= true;
727 pte
.raddr
= eaddr
& KVM_PAM
;
729 pte
.vpage
= eaddr
>> 12;
730 pte
.page_size
= MMU_PAGE_64K
;
734 switch (kvmppc_get_msr(vcpu
) & (MSR_DR
|MSR_IR
)) {
736 pte
.vpage
|= ((u64
)VSID_REAL
<< (SID_SHIFT
- 12));
740 (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) &&
741 ((pte
.raddr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
))
742 pte
.raddr
&= ~SPLIT_HACK_MASK
;
745 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, eaddr
>> SID_SHIFT
, &vsid
);
747 if ((kvmppc_get_msr(vcpu
) & (MSR_DR
|MSR_IR
)) == MSR_DR
)
748 pte
.vpage
|= ((u64
)VSID_REAL_DR
<< (SID_SHIFT
- 12));
750 pte
.vpage
|= ((u64
)VSID_REAL_IR
<< (SID_SHIFT
- 12));
754 page_found
= -EINVAL
;
758 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
759 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
761 * If we do the dcbz hack, we have to NX on every execution,
762 * so we can patch the executing code. This renders our guest
765 pte
.may_execute
= !data
;
768 if (page_found
== -ENOENT
|| page_found
== -EPERM
) {
769 /* Page not found in guest PTE entries, or protection fault */
772 if (page_found
== -EPERM
)
773 flags
= DSISR_PROTFAULT
;
775 flags
= DSISR_NOHPTE
;
777 flags
|= vcpu
->arch
.fault_dsisr
& DSISR_ISSTORE
;
778 kvmppc_core_queue_data_storage(vcpu
, eaddr
, flags
);
780 kvmppc_core_queue_inst_storage(vcpu
, flags
);
782 } else if (page_found
== -EINVAL
) {
783 /* Page not found in guest SLB */
784 kvmppc_set_dar(vcpu
, kvmppc_get_fault_dar(vcpu
));
785 kvmppc_book3s_queue_irqprio(vcpu
, vec
+ 0x80);
786 } else if (kvmppc_visible_gpa(vcpu
, pte
.raddr
)) {
787 if (data
&& !(vcpu
->arch
.fault_dsisr
& DSISR_NOHPTE
)) {
789 * There is already a host HPTE there, presumably
790 * a read-only one for a page the guest thinks
791 * is writable, so get rid of it first.
793 kvmppc_mmu_unmap_page(vcpu
, &pte
);
795 /* The guest's PTE is not mapped yet. Map on the host */
796 if (kvmppc_mmu_map_page(vcpu
, &pte
, iswrite
) == -EIO
) {
797 /* Exit KVM if mapping failed */
798 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
802 vcpu
->stat
.sp_storage
++;
803 else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
804 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
)))
805 kvmppc_patch_dcbz(vcpu
, &pte
);
808 vcpu
->stat
.mmio_exits
++;
809 vcpu
->arch
.paddr_accessed
= pte
.raddr
;
810 vcpu
->arch
.vaddr_accessed
= pte
.eaddr
;
811 r
= kvmppc_emulate_mmio(vcpu
);
812 if ( r
== RESUME_HOST_NV
)
819 /* Give up external provider (FPU, Altivec, VSX) */
820 void kvmppc_giveup_ext(struct kvm_vcpu
*vcpu
, ulong msr
)
822 struct thread_struct
*t
= ¤t
->thread
;
825 * VSX instructions can access FP and vector registers, so if
826 * we are giving up VSX, make sure we give up FP and VMX as well.
829 msr
|= MSR_FP
| MSR_VEC
;
831 msr
&= vcpu
->arch
.guest_owned_ext
;
836 printk(KERN_INFO
"Giving up ext 0x%lx\n", msr
);
841 * Note that on CPUs with VSX, giveup_fpu stores
842 * both the traditional FP registers and the added VSX
843 * registers into thread.fp_state.fpr[].
845 if (t
->regs
->msr
& MSR_FP
)
847 t
->fp_save_area
= NULL
;
850 #ifdef CONFIG_ALTIVEC
852 if (current
->thread
.regs
->msr
& MSR_VEC
)
853 giveup_altivec(current
);
854 t
->vr_save_area
= NULL
;
858 vcpu
->arch
.guest_owned_ext
&= ~(msr
| MSR_VSX
);
859 kvmppc_recalc_shadow_msr(vcpu
);
862 /* Give up facility (TAR / EBB / DSCR) */
863 void kvmppc_giveup_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
865 #ifdef CONFIG_PPC_BOOK3S_64
866 if (!(vcpu
->arch
.shadow_fscr
& (1ULL << fac
))) {
867 /* Facility not available to the guest, ignore giveup request*/
873 vcpu
->arch
.tar
= mfspr(SPRN_TAR
);
874 mtspr(SPRN_TAR
, current
->thread
.tar
);
875 vcpu
->arch
.shadow_fscr
&= ~FSCR_TAR
;
881 /* Handle external providers (FPU, Altivec, VSX) */
882 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
885 struct thread_struct
*t
= ¤t
->thread
;
887 /* When we have paired singles, we emulate in software */
888 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
)
891 if (!(kvmppc_get_msr(vcpu
) & msr
)) {
892 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
896 if (msr
== MSR_VSX
) {
897 /* No VSX? Give an illegal instruction interrupt */
899 if (!cpu_has_feature(CPU_FTR_VSX
))
902 kvmppc_core_queue_program(vcpu
, SRR1_PROGILL
);
907 * We have to load up all the FP and VMX registers before
908 * we can let the guest use VSX instructions.
910 msr
= MSR_FP
| MSR_VEC
| MSR_VSX
;
913 /* See if we already own all the ext(s) needed */
914 msr
&= ~vcpu
->arch
.guest_owned_ext
;
919 printk(KERN_INFO
"Loading up ext 0x%lx\n", msr
);
925 load_fp_state(&vcpu
->arch
.fp
);
927 t
->fp_save_area
= &vcpu
->arch
.fp
;
932 #ifdef CONFIG_ALTIVEC
934 enable_kernel_altivec();
935 load_vr_state(&vcpu
->arch
.vr
);
936 disable_kernel_altivec();
937 t
->vr_save_area
= &vcpu
->arch
.vr
;
943 vcpu
->arch
.guest_owned_ext
|= msr
;
944 kvmppc_recalc_shadow_msr(vcpu
);
950 * Kernel code using FP or VMX could have flushed guest state to
951 * the thread_struct; if so, get it back now.
953 static void kvmppc_handle_lost_ext(struct kvm_vcpu
*vcpu
)
955 unsigned long lost_ext
;
957 lost_ext
= vcpu
->arch
.guest_owned_ext
& ~current
->thread
.regs
->msr
;
961 if (lost_ext
& MSR_FP
) {
964 load_fp_state(&vcpu
->arch
.fp
);
968 #ifdef CONFIG_ALTIVEC
969 if (lost_ext
& MSR_VEC
) {
971 enable_kernel_altivec();
972 load_vr_state(&vcpu
->arch
.vr
);
973 disable_kernel_altivec();
977 current
->thread
.regs
->msr
|= lost_ext
;
980 #ifdef CONFIG_PPC_BOOK3S_64
982 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu
*vcpu
, ulong fac
)
984 /* Inject the Interrupt Cause field and trigger a guest interrupt */
985 vcpu
->arch
.fscr
&= ~(0xffULL
<< 56);
986 vcpu
->arch
.fscr
|= (fac
<< 56);
987 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_FAC_UNAVAIL
);
990 static void kvmppc_emulate_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
992 enum emulation_result er
= EMULATE_FAIL
;
994 if (!(kvmppc_get_msr(vcpu
) & MSR_PR
))
995 er
= kvmppc_emulate_instruction(vcpu
);
997 if ((er
!= EMULATE_DONE
) && (er
!= EMULATE_AGAIN
)) {
998 /* Couldn't emulate, trigger interrupt in guest */
999 kvmppc_trigger_fac_interrupt(vcpu
, fac
);
1003 /* Enable facilities (TAR, EBB, DSCR) for the guest */
1004 static int kvmppc_handle_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
1006 bool guest_fac_enabled
;
1007 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S
));
1010 * Not every facility is enabled by FSCR bits, check whether the
1011 * guest has this facility enabled at all.
1016 guest_fac_enabled
= (vcpu
->arch
.fscr
& (1ULL << fac
));
1019 guest_fac_enabled
= kvmppc_get_msr(vcpu
) & MSR_TM
;
1022 guest_fac_enabled
= false;
1026 if (!guest_fac_enabled
) {
1027 /* Facility not enabled by the guest */
1028 kvmppc_trigger_fac_interrupt(vcpu
, fac
);
1029 return RESUME_GUEST
;
1034 /* TAR switching isn't lazy in Linux yet */
1035 current
->thread
.tar
= mfspr(SPRN_TAR
);
1036 mtspr(SPRN_TAR
, vcpu
->arch
.tar
);
1037 vcpu
->arch
.shadow_fscr
|= FSCR_TAR
;
1040 kvmppc_emulate_fac(vcpu
, fac
);
1044 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1045 /* Since we disabled MSR_TM at privilege state, the mfspr instruction
1046 * for TM spr can trigger TM fac unavailable. In this case, the
1047 * emulation is handled by kvmppc_emulate_fac(), which invokes
1048 * kvmppc_emulate_mfspr() finally. But note the mfspr can include
1049 * RT for NV registers. So it need to restore those NV reg to reflect
1052 if ((fac
== FSCR_TM_LG
) && !(kvmppc_get_msr(vcpu
) & MSR_PR
))
1053 return RESUME_GUEST_NV
;
1056 return RESUME_GUEST
;
1059 void kvmppc_set_fscr(struct kvm_vcpu
*vcpu
, u64 fscr
)
1061 if ((vcpu
->arch
.fscr
& FSCR_TAR
) && !(fscr
& FSCR_TAR
)) {
1062 /* TAR got dropped, drop it in shadow too */
1063 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
1064 } else if (!(vcpu
->arch
.fscr
& FSCR_TAR
) && (fscr
& FSCR_TAR
)) {
1065 vcpu
->arch
.fscr
= fscr
;
1066 kvmppc_handle_fac(vcpu
, FSCR_TAR_LG
);
1070 vcpu
->arch
.fscr
= fscr
;
1074 static void kvmppc_setup_debug(struct kvm_vcpu
*vcpu
)
1076 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
) {
1077 u64 msr
= kvmppc_get_msr(vcpu
);
1079 kvmppc_set_msr(vcpu
, msr
| MSR_SE
);
1083 static void kvmppc_clear_debug(struct kvm_vcpu
*vcpu
)
1085 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
) {
1086 u64 msr
= kvmppc_get_msr(vcpu
);
1088 kvmppc_set_msr(vcpu
, msr
& ~MSR_SE
);
1092 static int kvmppc_exit_pr_progint(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
)
1094 enum emulation_result er
;
1100 * shadow_srr1 only contains valid flags if we came here via a program
1101 * exception. The other exceptions (emulation assist, FP unavailable,
1102 * etc.) do not provide flags in SRR1, so use an illegal-instruction
1103 * exception when injecting a program interrupt into the guest.
1105 if (exit_nr
== BOOK3S_INTERRUPT_PROGRAM
)
1106 flags
= vcpu
->arch
.shadow_srr1
& 0x1f0000ull
;
1108 flags
= SRR1_PROGILL
;
1110 emul
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
1111 if (emul
!= EMULATE_DONE
)
1112 return RESUME_GUEST
;
1114 if (kvmppc_get_msr(vcpu
) & MSR_PR
) {
1116 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1117 kvmppc_get_pc(vcpu
), last_inst
);
1119 if ((last_inst
& 0xff0007ff) != (INS_DCBZ
& 0xfffffff7)) {
1120 kvmppc_core_queue_program(vcpu
, flags
);
1121 return RESUME_GUEST
;
1125 vcpu
->stat
.emulated_inst_exits
++;
1126 er
= kvmppc_emulate_instruction(vcpu
);
1129 r
= RESUME_GUEST_NV
;
1135 pr_crit("%s: emulation at %lx failed (%08x)\n",
1136 __func__
, kvmppc_get_pc(vcpu
), last_inst
);
1137 kvmppc_core_queue_program(vcpu
, flags
);
1140 case EMULATE_DO_MMIO
:
1141 vcpu
->run
->exit_reason
= KVM_EXIT_MMIO
;
1144 case EMULATE_EXIT_USER
:
1154 int kvmppc_handle_exit_pr(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
)
1156 struct kvm_run
*run
= vcpu
->run
;
1157 int r
= RESUME_HOST
;
1160 vcpu
->stat
.sum_exits
++;
1162 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1163 run
->ready_for_interrupt_injection
= 1;
1165 /* We get here with MSR.EE=1 */
1167 trace_kvm_exit(exit_nr
, vcpu
);
1171 case BOOK3S_INTERRUPT_INST_STORAGE
:
1173 ulong shadow_srr1
= vcpu
->arch
.shadow_srr1
;
1174 vcpu
->stat
.pf_instruc
++;
1176 if (kvmppc_is_split_real(vcpu
))
1177 kvmppc_fixup_split_real(vcpu
);
1179 #ifdef CONFIG_PPC_BOOK3S_32
1180 /* We set segments as unused segments when invalidating them. So
1181 * treat the respective fault as segment fault. */
1183 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
1186 svcpu
= svcpu_get(vcpu
);
1187 sr
= svcpu
->sr
[kvmppc_get_pc(vcpu
) >> SID_SHIFT
];
1189 if (sr
== SR_INVALID
) {
1190 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
1197 /* only care about PTEG not found errors, but leave NX alone */
1198 if (shadow_srr1
& 0x40000000) {
1199 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1200 r
= kvmppc_handle_pagefault(vcpu
, kvmppc_get_pc(vcpu
), exit_nr
);
1201 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1202 vcpu
->stat
.sp_instruc
++;
1203 } else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
1204 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
1206 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1207 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1208 * that no guest that needs the dcbz hack does NX.
1210 kvmppc_mmu_pte_flush(vcpu
, kvmppc_get_pc(vcpu
), ~0xFFFUL
);
1213 kvmppc_core_queue_inst_storage(vcpu
,
1214 shadow_srr1
& 0x58000000);
1219 case BOOK3S_INTERRUPT_DATA_STORAGE
:
1221 ulong dar
= kvmppc_get_fault_dar(vcpu
);
1222 u32 fault_dsisr
= vcpu
->arch
.fault_dsisr
;
1223 vcpu
->stat
.pf_storage
++;
1225 #ifdef CONFIG_PPC_BOOK3S_32
1226 /* We set segments as unused segments when invalidating them. So
1227 * treat the respective fault as segment fault. */
1229 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
1232 svcpu
= svcpu_get(vcpu
);
1233 sr
= svcpu
->sr
[dar
>> SID_SHIFT
];
1235 if (sr
== SR_INVALID
) {
1236 kvmppc_mmu_map_segment(vcpu
, dar
);
1244 * We need to handle missing shadow PTEs, and
1245 * protection faults due to us mapping a page read-only
1246 * when the guest thinks it is writable.
1248 if (fault_dsisr
& (DSISR_NOHPTE
| DSISR_PROTFAULT
)) {
1249 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1250 r
= kvmppc_handle_pagefault(vcpu
, dar
, exit_nr
);
1251 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1253 kvmppc_core_queue_data_storage(vcpu
, dar
, fault_dsisr
);
1258 case BOOK3S_INTERRUPT_DATA_SEGMENT
:
1259 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_fault_dar(vcpu
)) < 0) {
1260 kvmppc_set_dar(vcpu
, kvmppc_get_fault_dar(vcpu
));
1261 kvmppc_book3s_queue_irqprio(vcpu
,
1262 BOOK3S_INTERRUPT_DATA_SEGMENT
);
1266 case BOOK3S_INTERRUPT_INST_SEGMENT
:
1267 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
)) < 0) {
1268 kvmppc_book3s_queue_irqprio(vcpu
,
1269 BOOK3S_INTERRUPT_INST_SEGMENT
);
1273 /* We're good on these - the host merely wanted to get our attention */
1274 case BOOK3S_INTERRUPT_DECREMENTER
:
1275 case BOOK3S_INTERRUPT_HV_DECREMENTER
:
1276 case BOOK3S_INTERRUPT_DOORBELL
:
1277 case BOOK3S_INTERRUPT_H_DOORBELL
:
1278 vcpu
->stat
.dec_exits
++;
1281 case BOOK3S_INTERRUPT_EXTERNAL
:
1282 case BOOK3S_INTERRUPT_EXTERNAL_HV
:
1283 case BOOK3S_INTERRUPT_H_VIRT
:
1284 vcpu
->stat
.ext_intr_exits
++;
1287 case BOOK3S_INTERRUPT_HMI
:
1288 case BOOK3S_INTERRUPT_PERFMON
:
1289 case BOOK3S_INTERRUPT_SYSTEM_RESET
:
1292 case BOOK3S_INTERRUPT_PROGRAM
:
1293 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
1294 r
= kvmppc_exit_pr_progint(vcpu
, exit_nr
);
1296 case BOOK3S_INTERRUPT_SYSCALL
:
1301 /* Get last sc for papr */
1302 if (vcpu
->arch
.papr_enabled
) {
1303 /* The sc instuction points SRR0 to the next inst */
1304 emul
= kvmppc_get_last_inst(vcpu
, INST_SC
, &last_sc
);
1305 if (emul
!= EMULATE_DONE
) {
1306 kvmppc_set_pc(vcpu
, kvmppc_get_pc(vcpu
) - 4);
1312 if (vcpu
->arch
.papr_enabled
&&
1313 (last_sc
== 0x44000022) &&
1314 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
1315 /* SC 1 papr hypercalls */
1316 ulong cmd
= kvmppc_get_gpr(vcpu
, 3);
1319 #ifdef CONFIG_PPC_BOOK3S_64
1320 if (kvmppc_h_pr(vcpu
, cmd
) == EMULATE_DONE
) {
1326 run
->papr_hcall
.nr
= cmd
;
1327 for (i
= 0; i
< 9; ++i
) {
1328 ulong gpr
= kvmppc_get_gpr(vcpu
, 4 + i
);
1329 run
->papr_hcall
.args
[i
] = gpr
;
1331 run
->exit_reason
= KVM_EXIT_PAPR_HCALL
;
1332 vcpu
->arch
.hcall_needed
= 1;
1334 } else if (vcpu
->arch
.osi_enabled
&&
1335 (((u32
)kvmppc_get_gpr(vcpu
, 3)) == OSI_SC_MAGIC_R3
) &&
1336 (((u32
)kvmppc_get_gpr(vcpu
, 4)) == OSI_SC_MAGIC_R4
)) {
1337 /* MOL hypercalls */
1338 u64
*gprs
= run
->osi
.gprs
;
1341 run
->exit_reason
= KVM_EXIT_OSI
;
1342 for (i
= 0; i
< 32; i
++)
1343 gprs
[i
] = kvmppc_get_gpr(vcpu
, i
);
1344 vcpu
->arch
.osi_needed
= 1;
1346 } else if (!(kvmppc_get_msr(vcpu
) & MSR_PR
) &&
1347 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
1348 /* KVM PV hypercalls */
1349 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
1352 /* Guest syscalls */
1353 vcpu
->stat
.syscall_exits
++;
1354 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1359 case BOOK3S_INTERRUPT_FP_UNAVAIL
:
1360 case BOOK3S_INTERRUPT_ALTIVEC
:
1361 case BOOK3S_INTERRUPT_VSX
:
1367 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
) {
1368 /* Do paired single instruction emulation */
1369 emul
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
,
1371 if (emul
== EMULATE_DONE
)
1372 r
= kvmppc_exit_pr_progint(vcpu
, exit_nr
);
1379 /* Enable external provider */
1381 case BOOK3S_INTERRUPT_FP_UNAVAIL
:
1385 case BOOK3S_INTERRUPT_ALTIVEC
:
1389 case BOOK3S_INTERRUPT_VSX
:
1394 r
= kvmppc_handle_ext(vcpu
, exit_nr
, ext_msr
);
1397 case BOOK3S_INTERRUPT_ALIGNMENT
:
1400 int emul
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
1402 if (emul
== EMULATE_DONE
) {
1406 dsisr
= kvmppc_alignment_dsisr(vcpu
, last_inst
);
1407 dar
= kvmppc_alignment_dar(vcpu
, last_inst
);
1409 kvmppc_set_dsisr(vcpu
, dsisr
);
1410 kvmppc_set_dar(vcpu
, dar
);
1412 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1417 #ifdef CONFIG_PPC_BOOK3S_64
1418 case BOOK3S_INTERRUPT_FAC_UNAVAIL
:
1419 r
= kvmppc_handle_fac(vcpu
, vcpu
->arch
.shadow_fscr
>> 56);
1422 case BOOK3S_INTERRUPT_MACHINE_CHECK
:
1423 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1426 case BOOK3S_INTERRUPT_TRACE
:
1427 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
) {
1428 run
->exit_reason
= KVM_EXIT_DEBUG
;
1431 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1437 ulong shadow_srr1
= vcpu
->arch
.shadow_srr1
;
1438 /* Ugh - bork here! What did we get? */
1439 printk(KERN_EMERG
"exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1440 exit_nr
, kvmppc_get_pc(vcpu
), shadow_srr1
);
1447 if (!(r
& RESUME_HOST
)) {
1448 /* To avoid clobbering exit_reason, only check for signals if
1449 * we aren't already exiting to userspace for some other
1453 * Interrupts could be timers for the guest which we have to
1454 * inject again, so let's postpone them until we're in the guest
1455 * and if we really did time things so badly, then we just exit
1456 * again due to a host external interrupt.
1458 s
= kvmppc_prepare_to_enter(vcpu
);
1462 /* interrupts now hard-disabled */
1463 kvmppc_fix_ee_before_entry();
1466 kvmppc_handle_lost_ext(vcpu
);
1469 trace_kvm_book3s_reenter(r
, vcpu
);
1474 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu
*vcpu
,
1475 struct kvm_sregs
*sregs
)
1477 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
1480 sregs
->pvr
= vcpu
->arch
.pvr
;
1482 sregs
->u
.s
.sdr1
= to_book3s(vcpu
)->sdr1
;
1483 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
1484 for (i
= 0; i
< 64; i
++) {
1485 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
| i
;
1486 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
1489 for (i
= 0; i
< 16; i
++)
1490 sregs
->u
.s
.ppc32
.sr
[i
] = kvmppc_get_sr(vcpu
, i
);
1492 for (i
= 0; i
< 8; i
++) {
1493 sregs
->u
.s
.ppc32
.ibat
[i
] = vcpu3s
->ibat
[i
].raw
;
1494 sregs
->u
.s
.ppc32
.dbat
[i
] = vcpu3s
->dbat
[i
].raw
;
1501 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu
*vcpu
,
1502 struct kvm_sregs
*sregs
)
1504 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
1507 kvmppc_set_pvr_pr(vcpu
, sregs
->pvr
);
1509 vcpu3s
->sdr1
= sregs
->u
.s
.sdr1
;
1510 #ifdef CONFIG_PPC_BOOK3S_64
1511 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
1512 /* Flush all SLB entries */
1513 vcpu
->arch
.mmu
.slbmte(vcpu
, 0, 0);
1514 vcpu
->arch
.mmu
.slbia(vcpu
);
1516 for (i
= 0; i
< 64; i
++) {
1517 u64 rb
= sregs
->u
.s
.ppc64
.slb
[i
].slbe
;
1518 u64 rs
= sregs
->u
.s
.ppc64
.slb
[i
].slbv
;
1520 if (rb
& SLB_ESID_V
)
1521 vcpu
->arch
.mmu
.slbmte(vcpu
, rs
, rb
);
1526 for (i
= 0; i
< 16; i
++) {
1527 vcpu
->arch
.mmu
.mtsrin(vcpu
, i
, sregs
->u
.s
.ppc32
.sr
[i
]);
1529 for (i
= 0; i
< 8; i
++) {
1530 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), false,
1531 (u32
)sregs
->u
.s
.ppc32
.ibat
[i
]);
1532 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), true,
1533 (u32
)(sregs
->u
.s
.ppc32
.ibat
[i
] >> 32));
1534 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), false,
1535 (u32
)sregs
->u
.s
.ppc32
.dbat
[i
]);
1536 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), true,
1537 (u32
)(sregs
->u
.s
.ppc32
.dbat
[i
] >> 32));
1541 /* Flush the MMU after messing with the segments */
1542 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
1547 static int kvmppc_get_one_reg_pr(struct kvm_vcpu
*vcpu
, u64 id
,
1548 union kvmppc_one_reg
*val
)
1553 case KVM_REG_PPC_DEBUG_INST
:
1554 *val
= get_reg_val(id
, KVMPPC_INST_SW_BREAKPOINT
);
1556 case KVM_REG_PPC_HIOR
:
1557 *val
= get_reg_val(id
, to_book3s(vcpu
)->hior
);
1559 case KVM_REG_PPC_VTB
:
1560 *val
= get_reg_val(id
, to_book3s(vcpu
)->vtb
);
1562 case KVM_REG_PPC_LPCR
:
1563 case KVM_REG_PPC_LPCR_64
:
1565 * We are only interested in the LPCR_ILE bit
1567 if (vcpu
->arch
.intr_msr
& MSR_LE
)
1568 *val
= get_reg_val(id
, LPCR_ILE
);
1570 *val
= get_reg_val(id
, 0);
1572 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1573 case KVM_REG_PPC_TFHAR
:
1574 *val
= get_reg_val(id
, vcpu
->arch
.tfhar
);
1576 case KVM_REG_PPC_TFIAR
:
1577 *val
= get_reg_val(id
, vcpu
->arch
.tfiar
);
1579 case KVM_REG_PPC_TEXASR
:
1580 *val
= get_reg_val(id
, vcpu
->arch
.texasr
);
1582 case KVM_REG_PPC_TM_GPR0
... KVM_REG_PPC_TM_GPR31
:
1583 *val
= get_reg_val(id
,
1584 vcpu
->arch
.gpr_tm
[id
-KVM_REG_PPC_TM_GPR0
]);
1586 case KVM_REG_PPC_TM_VSR0
... KVM_REG_PPC_TM_VSR63
:
1590 i
= id
- KVM_REG_PPC_TM_VSR0
;
1592 for (j
= 0; j
< TS_FPRWIDTH
; j
++)
1593 val
->vsxval
[j
] = vcpu
->arch
.fp_tm
.fpr
[i
][j
];
1595 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1596 val
->vval
= vcpu
->arch
.vr_tm
.vr
[i
-32];
1602 case KVM_REG_PPC_TM_CR
:
1603 *val
= get_reg_val(id
, vcpu
->arch
.cr_tm
);
1605 case KVM_REG_PPC_TM_XER
:
1606 *val
= get_reg_val(id
, vcpu
->arch
.xer_tm
);
1608 case KVM_REG_PPC_TM_LR
:
1609 *val
= get_reg_val(id
, vcpu
->arch
.lr_tm
);
1611 case KVM_REG_PPC_TM_CTR
:
1612 *val
= get_reg_val(id
, vcpu
->arch
.ctr_tm
);
1614 case KVM_REG_PPC_TM_FPSCR
:
1615 *val
= get_reg_val(id
, vcpu
->arch
.fp_tm
.fpscr
);
1617 case KVM_REG_PPC_TM_AMR
:
1618 *val
= get_reg_val(id
, vcpu
->arch
.amr_tm
);
1620 case KVM_REG_PPC_TM_PPR
:
1621 *val
= get_reg_val(id
, vcpu
->arch
.ppr_tm
);
1623 case KVM_REG_PPC_TM_VRSAVE
:
1624 *val
= get_reg_val(id
, vcpu
->arch
.vrsave_tm
);
1626 case KVM_REG_PPC_TM_VSCR
:
1627 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1628 *val
= get_reg_val(id
, vcpu
->arch
.vr_tm
.vscr
.u
[3]);
1632 case KVM_REG_PPC_TM_DSCR
:
1633 *val
= get_reg_val(id
, vcpu
->arch
.dscr_tm
);
1635 case KVM_REG_PPC_TM_TAR
:
1636 *val
= get_reg_val(id
, vcpu
->arch
.tar_tm
);
1647 static void kvmppc_set_lpcr_pr(struct kvm_vcpu
*vcpu
, u64 new_lpcr
)
1649 if (new_lpcr
& LPCR_ILE
)
1650 vcpu
->arch
.intr_msr
|= MSR_LE
;
1652 vcpu
->arch
.intr_msr
&= ~MSR_LE
;
1655 static int kvmppc_set_one_reg_pr(struct kvm_vcpu
*vcpu
, u64 id
,
1656 union kvmppc_one_reg
*val
)
1661 case KVM_REG_PPC_HIOR
:
1662 to_book3s(vcpu
)->hior
= set_reg_val(id
, *val
);
1663 to_book3s(vcpu
)->hior_explicit
= true;
1665 case KVM_REG_PPC_VTB
:
1666 to_book3s(vcpu
)->vtb
= set_reg_val(id
, *val
);
1668 case KVM_REG_PPC_LPCR
:
1669 case KVM_REG_PPC_LPCR_64
:
1670 kvmppc_set_lpcr_pr(vcpu
, set_reg_val(id
, *val
));
1672 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1673 case KVM_REG_PPC_TFHAR
:
1674 vcpu
->arch
.tfhar
= set_reg_val(id
, *val
);
1676 case KVM_REG_PPC_TFIAR
:
1677 vcpu
->arch
.tfiar
= set_reg_val(id
, *val
);
1679 case KVM_REG_PPC_TEXASR
:
1680 vcpu
->arch
.texasr
= set_reg_val(id
, *val
);
1682 case KVM_REG_PPC_TM_GPR0
... KVM_REG_PPC_TM_GPR31
:
1683 vcpu
->arch
.gpr_tm
[id
- KVM_REG_PPC_TM_GPR0
] =
1684 set_reg_val(id
, *val
);
1686 case KVM_REG_PPC_TM_VSR0
... KVM_REG_PPC_TM_VSR63
:
1690 i
= id
- KVM_REG_PPC_TM_VSR0
;
1692 for (j
= 0; j
< TS_FPRWIDTH
; j
++)
1693 vcpu
->arch
.fp_tm
.fpr
[i
][j
] = val
->vsxval
[j
];
1695 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1696 vcpu
->arch
.vr_tm
.vr
[i
-32] = val
->vval
;
1701 case KVM_REG_PPC_TM_CR
:
1702 vcpu
->arch
.cr_tm
= set_reg_val(id
, *val
);
1704 case KVM_REG_PPC_TM_XER
:
1705 vcpu
->arch
.xer_tm
= set_reg_val(id
, *val
);
1707 case KVM_REG_PPC_TM_LR
:
1708 vcpu
->arch
.lr_tm
= set_reg_val(id
, *val
);
1710 case KVM_REG_PPC_TM_CTR
:
1711 vcpu
->arch
.ctr_tm
= set_reg_val(id
, *val
);
1713 case KVM_REG_PPC_TM_FPSCR
:
1714 vcpu
->arch
.fp_tm
.fpscr
= set_reg_val(id
, *val
);
1716 case KVM_REG_PPC_TM_AMR
:
1717 vcpu
->arch
.amr_tm
= set_reg_val(id
, *val
);
1719 case KVM_REG_PPC_TM_PPR
:
1720 vcpu
->arch
.ppr_tm
= set_reg_val(id
, *val
);
1722 case KVM_REG_PPC_TM_VRSAVE
:
1723 vcpu
->arch
.vrsave_tm
= set_reg_val(id
, *val
);
1725 case KVM_REG_PPC_TM_VSCR
:
1726 if (cpu_has_feature(CPU_FTR_ALTIVEC
))
1727 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(id
, *val
);
1731 case KVM_REG_PPC_TM_DSCR
:
1732 vcpu
->arch
.dscr_tm
= set_reg_val(id
, *val
);
1734 case KVM_REG_PPC_TM_TAR
:
1735 vcpu
->arch
.tar_tm
= set_reg_val(id
, *val
);
1746 static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu
*vcpu
)
1748 struct kvmppc_vcpu_book3s
*vcpu_book3s
;
1754 vcpu_book3s
= vzalloc(sizeof(struct kvmppc_vcpu_book3s
));
1757 vcpu
->arch
.book3s
= vcpu_book3s
;
1759 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1760 vcpu
->arch
.shadow_vcpu
=
1761 kzalloc(sizeof(*vcpu
->arch
.shadow_vcpu
), GFP_KERNEL
);
1762 if (!vcpu
->arch
.shadow_vcpu
)
1766 p
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
1768 goto free_shadow_vcpu
;
1769 vcpu
->arch
.shared
= (void *)p
;
1770 #ifdef CONFIG_PPC_BOOK3S_64
1771 /* Always start the shared struct in native endian mode */
1772 #ifdef __BIG_ENDIAN__
1773 vcpu
->arch
.shared_big_endian
= true;
1775 vcpu
->arch
.shared_big_endian
= false;
1779 * Default to the same as the host if we're on sufficiently
1780 * recent machine that we have 1TB segments;
1781 * otherwise default to PPC970FX.
1783 vcpu
->arch
.pvr
= 0x3C0301;
1784 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1785 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
1786 vcpu
->arch
.intr_msr
= MSR_SF
;
1788 /* default to book3s_32 (750) */
1789 vcpu
->arch
.pvr
= 0x84202;
1790 vcpu
->arch
.intr_msr
= 0;
1792 kvmppc_set_pvr_pr(vcpu
, vcpu
->arch
.pvr
);
1793 vcpu
->arch
.slb_nr
= 64;
1795 vcpu
->arch
.shadow_msr
= MSR_USER64
& ~MSR_LE
;
1797 err
= kvmppc_mmu_init_pr(vcpu
);
1799 goto free_shared_page
;
1804 free_page((unsigned long)vcpu
->arch
.shared
);
1806 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1807 kfree(vcpu
->arch
.shadow_vcpu
);
1815 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu
*vcpu
)
1817 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
1819 kvmppc_mmu_destroy_pr(vcpu
);
1820 free_page((unsigned long)vcpu
->arch
.shared
& PAGE_MASK
);
1821 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1822 kfree(vcpu
->arch
.shadow_vcpu
);
1827 static int kvmppc_vcpu_run_pr(struct kvm_vcpu
*vcpu
)
1831 /* Check if we can run the vcpu at all */
1832 if (!vcpu
->arch
.sane
) {
1833 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1838 kvmppc_setup_debug(vcpu
);
1841 * Interrupts could be timers for the guest which we have to inject
1842 * again, so let's postpone them until we're in the guest and if we
1843 * really did time things so badly, then we just exit again due to
1844 * a host external interrupt.
1846 ret
= kvmppc_prepare_to_enter(vcpu
);
1849 /* interrupts now hard-disabled */
1851 /* Save FPU, Altivec and VSX state */
1852 giveup_all(current
);
1854 /* Preload FPU if it's enabled */
1855 if (kvmppc_get_msr(vcpu
) & MSR_FP
)
1856 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
1858 kvmppc_fix_ee_before_entry();
1860 ret
= __kvmppc_vcpu_run(vcpu
);
1862 kvmppc_clear_debug(vcpu
);
1864 /* No need for guest_exit. It's done in handle_exit.
1865 We also get here with interrupts enabled. */
1867 /* Make sure we save the guest FPU/Altivec/VSX state */
1868 kvmppc_giveup_ext(vcpu
, MSR_FP
| MSR_VEC
| MSR_VSX
);
1870 /* Make sure we save the guest TAR/EBB/DSCR state */
1871 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
1874 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1879 * Get (and clear) the dirty memory log for a memory slot.
1881 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm
*kvm
,
1882 struct kvm_dirty_log
*log
)
1884 struct kvm_memory_slot
*memslot
;
1885 struct kvm_vcpu
*vcpu
;
1891 mutex_lock(&kvm
->slots_lock
);
1893 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
, &memslot
);
1897 /* If nothing is dirty, don't bother messing with page tables. */
1899 ga
= memslot
->base_gfn
<< PAGE_SHIFT
;
1900 ga_end
= ga
+ (memslot
->npages
<< PAGE_SHIFT
);
1902 kvm_for_each_vcpu(n
, vcpu
, kvm
)
1903 kvmppc_mmu_pte_pflush(vcpu
, ga
, ga_end
);
1905 n
= kvm_dirty_bitmap_bytes(memslot
);
1906 memset(memslot
->dirty_bitmap
, 0, n
);
1911 mutex_unlock(&kvm
->slots_lock
);
1915 static void kvmppc_core_flush_memslot_pr(struct kvm
*kvm
,
1916 struct kvm_memory_slot
*memslot
)
1921 static int kvmppc_core_prepare_memory_region_pr(struct kvm
*kvm
,
1922 struct kvm_memory_slot
*memslot
,
1923 const struct kvm_userspace_memory_region
*mem
,
1924 enum kvm_mr_change change
)
1929 static void kvmppc_core_commit_memory_region_pr(struct kvm
*kvm
,
1930 const struct kvm_userspace_memory_region
*mem
,
1931 const struct kvm_memory_slot
*old
,
1932 const struct kvm_memory_slot
*new,
1933 enum kvm_mr_change change
)
1938 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot
*slot
)
1944 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm
*kvm
,
1945 struct kvm_ppc_smmu_info
*info
)
1948 struct kvm_vcpu
*vcpu
;
1952 /* SLB is always 64 entries */
1953 info
->slb_size
= 64;
1955 /* Standard 4k base page size segment */
1956 info
->sps
[0].page_shift
= 12;
1957 info
->sps
[0].slb_enc
= 0;
1958 info
->sps
[0].enc
[0].page_shift
= 12;
1959 info
->sps
[0].enc
[0].pte_enc
= 0;
1962 * 64k large page size.
1963 * We only want to put this in if the CPUs we're emulating
1964 * support it, but unfortunately we don't have a vcpu easily
1965 * to hand here to test. Just pick the first vcpu, and if
1966 * that doesn't exist yet, report the minimum capability,
1967 * i.e., no 64k pages.
1968 * 1T segment support goes along with 64k pages.
1971 vcpu
= kvm_get_vcpu(kvm
, 0);
1972 if (vcpu
&& (vcpu
->arch
.hflags
& BOOK3S_HFLAG_MULTI_PGSIZE
)) {
1973 info
->flags
= KVM_PPC_1T_SEGMENTS
;
1974 info
->sps
[i
].page_shift
= 16;
1975 info
->sps
[i
].slb_enc
= SLB_VSID_L
| SLB_VSID_LP_01
;
1976 info
->sps
[i
].enc
[0].page_shift
= 16;
1977 info
->sps
[i
].enc
[0].pte_enc
= 1;
1981 /* Standard 16M large page size segment */
1982 info
->sps
[i
].page_shift
= 24;
1983 info
->sps
[i
].slb_enc
= SLB_VSID_L
;
1984 info
->sps
[i
].enc
[0].page_shift
= 24;
1985 info
->sps
[i
].enc
[0].pte_enc
= 0;
1990 static int kvm_configure_mmu_pr(struct kvm
*kvm
, struct kvm_ppc_mmuv3_cfg
*cfg
)
1992 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
1994 /* Require flags and process table base and size to all be zero. */
1995 if (cfg
->flags
|| cfg
->process_table
)
2001 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm
*kvm
,
2002 struct kvm_ppc_smmu_info
*info
)
2004 /* We should not get called */
2008 #endif /* CONFIG_PPC64 */
2010 static unsigned int kvm_global_user_count
= 0;
2011 static DEFINE_SPINLOCK(kvm_global_user_count_lock
);
2013 static int kvmppc_core_init_vm_pr(struct kvm
*kvm
)
2015 mutex_init(&kvm
->arch
.hpt_mutex
);
2017 #ifdef CONFIG_PPC_BOOK3S_64
2018 /* Start out with the default set of hcalls enabled */
2019 kvmppc_pr_init_default_hcalls(kvm
);
2022 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
2023 spin_lock(&kvm_global_user_count_lock
);
2024 if (++kvm_global_user_count
== 1)
2025 pseries_disable_reloc_on_exc();
2026 spin_unlock(&kvm_global_user_count_lock
);
2031 static void kvmppc_core_destroy_vm_pr(struct kvm
*kvm
)
2034 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
2037 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
2038 spin_lock(&kvm_global_user_count_lock
);
2039 BUG_ON(kvm_global_user_count
== 0);
2040 if (--kvm_global_user_count
== 0)
2041 pseries_enable_reloc_on_exc();
2042 spin_unlock(&kvm_global_user_count_lock
);
2046 static int kvmppc_core_check_processor_compat_pr(void)
2049 * PR KVM can work on POWER9 inside a guest partition
2050 * running in HPT mode. It can't work if we are using
2051 * radix translation (because radix provides no way for
2052 * a process to have unique translations in quadrant 3).
2054 if (cpu_has_feature(CPU_FTR_ARCH_300
) && radix_enabled())
2059 static long kvm_arch_vm_ioctl_pr(struct file
*filp
,
2060 unsigned int ioctl
, unsigned long arg
)
2065 static struct kvmppc_ops kvm_ops_pr
= {
2066 .get_sregs
= kvm_arch_vcpu_ioctl_get_sregs_pr
,
2067 .set_sregs
= kvm_arch_vcpu_ioctl_set_sregs_pr
,
2068 .get_one_reg
= kvmppc_get_one_reg_pr
,
2069 .set_one_reg
= kvmppc_set_one_reg_pr
,
2070 .vcpu_load
= kvmppc_core_vcpu_load_pr
,
2071 .vcpu_put
= kvmppc_core_vcpu_put_pr
,
2072 .inject_interrupt
= kvmppc_inject_interrupt_pr
,
2073 .set_msr
= kvmppc_set_msr_pr
,
2074 .vcpu_run
= kvmppc_vcpu_run_pr
,
2075 .vcpu_create
= kvmppc_core_vcpu_create_pr
,
2076 .vcpu_free
= kvmppc_core_vcpu_free_pr
,
2077 .check_requests
= kvmppc_core_check_requests_pr
,
2078 .get_dirty_log
= kvm_vm_ioctl_get_dirty_log_pr
,
2079 .flush_memslot
= kvmppc_core_flush_memslot_pr
,
2080 .prepare_memory_region
= kvmppc_core_prepare_memory_region_pr
,
2081 .commit_memory_region
= kvmppc_core_commit_memory_region_pr
,
2082 .unmap_hva_range
= kvm_unmap_hva_range_pr
,
2083 .age_hva
= kvm_age_hva_pr
,
2084 .test_age_hva
= kvm_test_age_hva_pr
,
2085 .set_spte_hva
= kvm_set_spte_hva_pr
,
2086 .free_memslot
= kvmppc_core_free_memslot_pr
,
2087 .init_vm
= kvmppc_core_init_vm_pr
,
2088 .destroy_vm
= kvmppc_core_destroy_vm_pr
,
2089 .get_smmu_info
= kvm_vm_ioctl_get_smmu_info_pr
,
2090 .emulate_op
= kvmppc_core_emulate_op_pr
,
2091 .emulate_mtspr
= kvmppc_core_emulate_mtspr_pr
,
2092 .emulate_mfspr
= kvmppc_core_emulate_mfspr_pr
,
2093 .fast_vcpu_kick
= kvm_vcpu_kick
,
2094 .arch_vm_ioctl
= kvm_arch_vm_ioctl_pr
,
2095 #ifdef CONFIG_PPC_BOOK3S_64
2096 .hcall_implemented
= kvmppc_hcall_impl_pr
,
2097 .configure_mmu
= kvm_configure_mmu_pr
,
2099 .giveup_ext
= kvmppc_giveup_ext
,
2103 int kvmppc_book3s_init_pr(void)
2107 r
= kvmppc_core_check_processor_compat_pr();
2111 kvm_ops_pr
.owner
= THIS_MODULE
;
2112 kvmppc_pr_ops
= &kvm_ops_pr
;
2114 r
= kvmppc_mmu_hpte_sysinit();
2118 void kvmppc_book3s_exit_pr(void)
2120 kvmppc_pr_ops
= NULL
;
2121 kvmppc_mmu_hpte_sysexit();
2125 * We only support separate modules for book3s 64
2127 #ifdef CONFIG_PPC_BOOK3S_64
2129 module_init(kvmppc_book3s_init_pr
);
2130 module_exit(kvmppc_book3s_exit_pr
);
2132 MODULE_LICENSE("GPL");
2133 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2134 MODULE_ALIAS("devname:kvm");