2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <linux/gfp.h>
37 #include <linux/sched.h>
38 #include <linux/vmalloc.h>
39 #include <linux/highmem.h>
43 /* #define EXIT_DEBUG */
44 /* #define DEBUG_EXT */
46 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
49 /* Some compatibility defines */
50 #ifdef CONFIG_PPC_BOOK3S_32
51 #define MSR_USER32 MSR_USER
52 #define MSR_USER64 MSR_USER
53 #define HW_PAGE_SIZE PAGE_SIZE
56 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
58 #ifdef CONFIG_PPC_BOOK3S_64
59 memcpy(to_svcpu(vcpu
)->slb
, to_book3s(vcpu
)->slb_shadow
, sizeof(to_svcpu(vcpu
)->slb
));
60 memcpy(&get_paca()->shadow_vcpu
, to_book3s(vcpu
)->shadow_vcpu
,
61 sizeof(get_paca()->shadow_vcpu
));
62 to_svcpu(vcpu
)->slb_max
= to_book3s(vcpu
)->slb_shadow_max
;
65 #ifdef CONFIG_PPC_BOOK3S_32
66 current
->thread
.kvm_shadow_vcpu
= to_book3s(vcpu
)->shadow_vcpu
;
70 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
72 #ifdef CONFIG_PPC_BOOK3S_64
73 memcpy(to_book3s(vcpu
)->slb_shadow
, to_svcpu(vcpu
)->slb
, sizeof(to_svcpu(vcpu
)->slb
));
74 memcpy(to_book3s(vcpu
)->shadow_vcpu
, &get_paca()->shadow_vcpu
,
75 sizeof(get_paca()->shadow_vcpu
));
76 to_book3s(vcpu
)->slb_shadow_max
= to_svcpu(vcpu
)->slb_max
;
79 kvmppc_giveup_ext(vcpu
, MSR_FP
);
80 kvmppc_giveup_ext(vcpu
, MSR_VEC
);
81 kvmppc_giveup_ext(vcpu
, MSR_VSX
);
84 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu
*vcpu
)
86 ulong smsr
= vcpu
->arch
.shared
->msr
;
88 /* Guest MSR values */
89 smsr
&= MSR_FE0
| MSR_FE1
| MSR_SF
| MSR_SE
| MSR_BE
| MSR_DE
;
90 /* Process MSR values */
91 smsr
|= MSR_ME
| MSR_RI
| MSR_IR
| MSR_DR
| MSR_PR
| MSR_EE
;
92 /* External providers the guest reserved */
93 smsr
|= (vcpu
->arch
.shared
->msr
& vcpu
->arch
.guest_owned_ext
);
94 /* 64-bit Process MSR values */
95 #ifdef CONFIG_PPC_BOOK3S_64
96 smsr
|= MSR_ISF
| MSR_HV
;
98 vcpu
->arch
.shadow_msr
= smsr
;
101 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
103 ulong old_msr
= vcpu
->arch
.shared
->msr
;
106 printk(KERN_INFO
"KVM: Set MSR to 0x%llx\n", msr
);
109 msr
&= to_book3s(vcpu
)->msr_mask
;
110 vcpu
->arch
.shared
->msr
= msr
;
111 kvmppc_recalc_shadow_msr(vcpu
);
114 if (!vcpu
->arch
.pending_exceptions
) {
115 kvm_vcpu_block(vcpu
);
116 vcpu
->stat
.halt_wakeup
++;
118 /* Unset POW bit after we woke up */
120 vcpu
->arch
.shared
->msr
= msr
;
124 if ((vcpu
->arch
.shared
->msr
& (MSR_PR
|MSR_IR
|MSR_DR
)) !=
125 (old_msr
& (MSR_PR
|MSR_IR
|MSR_DR
))) {
126 kvmppc_mmu_flush_segments(vcpu
);
127 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
129 /* Preload magic page segment when in kernel mode */
130 if (!(msr
& MSR_PR
) && vcpu
->arch
.magic_page_pa
) {
131 struct kvm_vcpu_arch
*a
= &vcpu
->arch
;
134 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_ea
);
136 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_pa
);
140 /* Preload FPU if it's enabled */
141 if (vcpu
->arch
.shared
->msr
& MSR_FP
)
142 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
145 void kvmppc_set_pvr(struct kvm_vcpu
*vcpu
, u32 pvr
)
149 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SLB
;
150 vcpu
->arch
.pvr
= pvr
;
151 #ifdef CONFIG_PPC_BOOK3S_64
152 if ((pvr
>= 0x330000) && (pvr
< 0x70330000)) {
153 kvmppc_mmu_book3s_64_init(vcpu
);
154 to_book3s(vcpu
)->hior
= 0xfff00000;
155 to_book3s(vcpu
)->msr_mask
= 0xffffffffffffffffULL
;
159 kvmppc_mmu_book3s_32_init(vcpu
);
160 to_book3s(vcpu
)->hior
= 0;
161 to_book3s(vcpu
)->msr_mask
= 0xffffffffULL
;
164 /* If we are in hypervisor level on 970, we can tell the CPU to
165 * treat DCBZ as 32 bytes store */
166 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_DCBZ32
;
167 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) && (mfmsr() & MSR_HV
) &&
168 !strcmp(cur_cpu_spec
->platform
, "ppc970"))
169 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
171 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
172 really needs them in a VM on Cell and force disable them. */
173 if (!strcmp(cur_cpu_spec
->platform
, "ppc-cell-be"))
174 to_book3s(vcpu
)->msr_mask
&= ~(MSR_FE0
| MSR_FE1
);
176 #ifdef CONFIG_PPC_BOOK3S_32
177 /* 32 bit Book3S always has 32 byte dcbz */
178 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
181 /* On some CPUs we can execute paired single operations natively */
182 asm ( "mfpvr %0" : "=r"(host_pvr
));
184 case 0x00080200: /* lonestar 2.0 */
185 case 0x00088202: /* lonestar 2.2 */
186 case 0x70000100: /* gekko 1.0 */
187 case 0x00080100: /* gekko 2.0 */
188 case 0x00083203: /* gekko 2.3a */
189 case 0x00083213: /* gekko 2.3b */
190 case 0x00083204: /* gekko 2.4 */
191 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
192 case 0x00087200: /* broadway */
193 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_NATIVE_PS
;
194 /* Enable HID2.PSE - in case we need it later */
195 mtspr(SPRN_HID2_GEKKO
, mfspr(SPRN_HID2_GEKKO
) | (1 << 29));
199 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
200 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
201 * emulate 32 bytes dcbz length.
203 * The Book3s_64 inventors also realized this case and implemented a special bit
204 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
206 * My approach here is to patch the dcbz instruction on executing pages.
208 static void kvmppc_patch_dcbz(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
215 hpage
= gfn_to_page(vcpu
->kvm
, pte
->raddr
>> PAGE_SHIFT
);
216 if (is_error_page(hpage
)) {
217 kvm_release_page_clean(hpage
);
221 hpage_offset
= pte
->raddr
& ~PAGE_MASK
;
222 hpage_offset
&= ~0xFFFULL
;
226 page
= kmap_atomic(hpage
, KM_USER0
);
228 /* patch dcbz into reserved instruction, so we trap */
229 for (i
=hpage_offset
; i
< hpage_offset
+ (HW_PAGE_SIZE
/ 4); i
++)
230 if ((page
[i
] & 0xff0007ff) == INS_DCBZ
)
231 page
[i
] &= 0xfffffff7;
233 kunmap_atomic(page
, KM_USER0
);
237 static int kvmppc_visible_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
239 ulong mp_pa
= vcpu
->arch
.magic_page_pa
;
241 if (unlikely(mp_pa
) &&
242 unlikely((mp_pa
& KVM_PAM
) >> PAGE_SHIFT
== gfn
)) {
246 return kvm_is_visible_gfn(vcpu
->kvm
, gfn
);
249 int kvmppc_handle_pagefault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
250 ulong eaddr
, int vec
)
252 bool data
= (vec
== BOOK3S_INTERRUPT_DATA_STORAGE
);
253 int r
= RESUME_GUEST
;
256 struct kvmppc_pte pte
;
257 bool is_mmio
= false;
258 bool dr
= (vcpu
->arch
.shared
->msr
& MSR_DR
) ? true : false;
259 bool ir
= (vcpu
->arch
.shared
->msr
& MSR_IR
) ? true : false;
262 relocated
= data
? dr
: ir
;
264 /* Resolve real address if translation turned on */
266 page_found
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, &pte
, data
);
268 pte
.may_execute
= true;
270 pte
.may_write
= true;
271 pte
.raddr
= eaddr
& KVM_PAM
;
273 pte
.vpage
= eaddr
>> 12;
276 switch (vcpu
->arch
.shared
->msr
& (MSR_DR
|MSR_IR
)) {
278 pte
.vpage
|= ((u64
)VSID_REAL
<< (SID_SHIFT
- 12));
282 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, eaddr
>> SID_SHIFT
, &vsid
);
284 if ((vcpu
->arch
.shared
->msr
& (MSR_DR
|MSR_IR
)) == MSR_DR
)
285 pte
.vpage
|= ((u64
)VSID_REAL_DR
<< (SID_SHIFT
- 12));
287 pte
.vpage
|= ((u64
)VSID_REAL_IR
<< (SID_SHIFT
- 12));
291 page_found
= -EINVAL
;
295 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
296 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
298 * If we do the dcbz hack, we have to NX on every execution,
299 * so we can patch the executing code. This renders our guest
302 pte
.may_execute
= !data
;
305 if (page_found
== -ENOENT
) {
306 /* Page not found in guest PTE entries */
307 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
308 vcpu
->arch
.shared
->dsisr
= to_svcpu(vcpu
)->fault_dsisr
;
309 vcpu
->arch
.shared
->msr
|=
310 (to_svcpu(vcpu
)->shadow_srr1
& 0x00000000f8000000ULL
);
311 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
312 } else if (page_found
== -EPERM
) {
313 /* Storage protection */
314 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
315 vcpu
->arch
.shared
->dsisr
=
316 to_svcpu(vcpu
)->fault_dsisr
& ~DSISR_NOHPTE
;
317 vcpu
->arch
.shared
->dsisr
|= DSISR_PROTFAULT
;
318 vcpu
->arch
.shared
->msr
|=
319 (to_svcpu(vcpu
)->shadow_srr1
& 0x00000000f8000000ULL
);
320 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
321 } else if (page_found
== -EINVAL
) {
322 /* Page not found in guest SLB */
323 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
324 kvmppc_book3s_queue_irqprio(vcpu
, vec
+ 0x80);
325 } else if (!is_mmio
&&
326 kvmppc_visible_gfn(vcpu
, pte
.raddr
>> PAGE_SHIFT
)) {
327 /* The guest's PTE is not mapped yet. Map on the host */
328 kvmppc_mmu_map_page(vcpu
, &pte
);
330 vcpu
->stat
.sp_storage
++;
331 else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
332 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
)))
333 kvmppc_patch_dcbz(vcpu
, &pte
);
336 vcpu
->stat
.mmio_exits
++;
337 vcpu
->arch
.paddr_accessed
= pte
.raddr
;
338 r
= kvmppc_emulate_mmio(run
, vcpu
);
339 if ( r
== RESUME_HOST_NV
)
346 static inline int get_fpr_index(int i
)
354 /* Give up external provider (FPU, Altivec, VSX) */
355 void kvmppc_giveup_ext(struct kvm_vcpu
*vcpu
, ulong msr
)
357 struct thread_struct
*t
= ¤t
->thread
;
358 u64
*vcpu_fpr
= vcpu
->arch
.fpr
;
360 u64
*vcpu_vsx
= vcpu
->arch
.vsr
;
362 u64
*thread_fpr
= (u64
*)t
->fpr
;
365 if (!(vcpu
->arch
.guest_owned_ext
& msr
))
369 printk(KERN_INFO
"Giving up ext 0x%lx\n", msr
);
375 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++)
376 vcpu_fpr
[i
] = thread_fpr
[get_fpr_index(i
)];
378 vcpu
->arch
.fpscr
= t
->fpscr
.val
;
381 #ifdef CONFIG_ALTIVEC
382 giveup_altivec(current
);
383 memcpy(vcpu
->arch
.vr
, t
->vr
, sizeof(vcpu
->arch
.vr
));
384 vcpu
->arch
.vscr
= t
->vscr
;
389 __giveup_vsx(current
);
390 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.vsr
); i
++)
391 vcpu_vsx
[i
] = thread_fpr
[get_fpr_index(i
) + 1];
398 vcpu
->arch
.guest_owned_ext
&= ~msr
;
399 current
->thread
.regs
->msr
&= ~msr
;
400 kvmppc_recalc_shadow_msr(vcpu
);
403 static int kvmppc_read_inst(struct kvm_vcpu
*vcpu
)
405 ulong srr0
= kvmppc_get_pc(vcpu
);
406 u32 last_inst
= kvmppc_get_last_inst(vcpu
);
409 ret
= kvmppc_ld(vcpu
, &srr0
, sizeof(u32
), &last_inst
, false);
410 if (ret
== -ENOENT
) {
411 ulong msr
= vcpu
->arch
.shared
->msr
;
413 msr
= kvmppc_set_field(msr
, 33, 33, 1);
414 msr
= kvmppc_set_field(msr
, 34, 36, 0);
415 vcpu
->arch
.shared
->msr
= kvmppc_set_field(msr
, 42, 47, 0);
416 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
);
417 return EMULATE_AGAIN
;
423 static int kvmppc_check_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
)
426 /* Need to do paired single emulation? */
427 if (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
))
430 /* Read out the instruction */
431 if (kvmppc_read_inst(vcpu
) == EMULATE_DONE
)
432 /* Need to emulate */
435 return EMULATE_AGAIN
;
438 /* Handle external providers (FPU, Altivec, VSX) */
439 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
442 struct thread_struct
*t
= ¤t
->thread
;
443 u64
*vcpu_fpr
= vcpu
->arch
.fpr
;
445 u64
*vcpu_vsx
= vcpu
->arch
.vsr
;
447 u64
*thread_fpr
= (u64
*)t
->fpr
;
450 /* When we have paired singles, we emulate in software */
451 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
)
454 if (!(vcpu
->arch
.shared
->msr
& msr
)) {
455 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
459 /* We already own the ext */
460 if (vcpu
->arch
.guest_owned_ext
& msr
) {
465 printk(KERN_INFO
"Loading up ext 0x%lx\n", msr
);
468 current
->thread
.regs
->msr
|= msr
;
472 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++)
473 thread_fpr
[get_fpr_index(i
)] = vcpu_fpr
[i
];
475 t
->fpscr
.val
= vcpu
->arch
.fpscr
;
477 kvmppc_load_up_fpu();
480 #ifdef CONFIG_ALTIVEC
481 memcpy(t
->vr
, vcpu
->arch
.vr
, sizeof(vcpu
->arch
.vr
));
482 t
->vscr
= vcpu
->arch
.vscr
;
484 kvmppc_load_up_altivec();
489 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.vsr
); i
++)
490 thread_fpr
[get_fpr_index(i
) + 1] = vcpu_vsx
[i
];
491 kvmppc_load_up_vsx();
498 vcpu
->arch
.guest_owned_ext
|= msr
;
500 kvmppc_recalc_shadow_msr(vcpu
);
505 int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
506 unsigned int exit_nr
)
510 vcpu
->stat
.sum_exits
++;
512 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
513 run
->ready_for_interrupt_injection
= 1;
515 trace_kvm_book3s_exit(exit_nr
, vcpu
);
518 case BOOK3S_INTERRUPT_INST_STORAGE
:
519 vcpu
->stat
.pf_instruc
++;
521 #ifdef CONFIG_PPC_BOOK3S_32
522 /* We set segments as unused segments when invalidating them. So
523 * treat the respective fault as segment fault. */
524 if (to_svcpu(vcpu
)->sr
[kvmppc_get_pc(vcpu
) >> SID_SHIFT
]
526 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
532 /* only care about PTEG not found errors, but leave NX alone */
533 if (to_svcpu(vcpu
)->shadow_srr1
& 0x40000000) {
534 r
= kvmppc_handle_pagefault(run
, vcpu
, kvmppc_get_pc(vcpu
), exit_nr
);
535 vcpu
->stat
.sp_instruc
++;
536 } else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
537 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
539 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
540 * so we can't use the NX bit inside the guest. Let's cross our fingers,
541 * that no guest that needs the dcbz hack does NX.
543 kvmppc_mmu_pte_flush(vcpu
, kvmppc_get_pc(vcpu
), ~0xFFFUL
);
546 vcpu
->arch
.shared
->msr
|=
547 to_svcpu(vcpu
)->shadow_srr1
& 0x58000000;
548 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
552 case BOOK3S_INTERRUPT_DATA_STORAGE
:
554 ulong dar
= kvmppc_get_fault_dar(vcpu
);
555 vcpu
->stat
.pf_storage
++;
557 #ifdef CONFIG_PPC_BOOK3S_32
558 /* We set segments as unused segments when invalidating them. So
559 * treat the respective fault as segment fault. */
560 if ((to_svcpu(vcpu
)->sr
[dar
>> SID_SHIFT
]) == SR_INVALID
) {
561 kvmppc_mmu_map_segment(vcpu
, dar
);
567 /* The only case we need to handle is missing shadow PTEs */
568 if (to_svcpu(vcpu
)->fault_dsisr
& DSISR_NOHPTE
) {
569 r
= kvmppc_handle_pagefault(run
, vcpu
, dar
, exit_nr
);
571 vcpu
->arch
.shared
->dar
= dar
;
572 vcpu
->arch
.shared
->dsisr
= to_svcpu(vcpu
)->fault_dsisr
;
573 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
578 case BOOK3S_INTERRUPT_DATA_SEGMENT
:
579 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_fault_dar(vcpu
)) < 0) {
580 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
581 kvmppc_book3s_queue_irqprio(vcpu
,
582 BOOK3S_INTERRUPT_DATA_SEGMENT
);
586 case BOOK3S_INTERRUPT_INST_SEGMENT
:
587 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
)) < 0) {
588 kvmppc_book3s_queue_irqprio(vcpu
,
589 BOOK3S_INTERRUPT_INST_SEGMENT
);
593 /* We're good on these - the host merely wanted to get our attention */
594 case BOOK3S_INTERRUPT_DECREMENTER
:
595 vcpu
->stat
.dec_exits
++;
598 case BOOK3S_INTERRUPT_EXTERNAL
:
599 vcpu
->stat
.ext_intr_exits
++;
602 case BOOK3S_INTERRUPT_PERFMON
:
605 case BOOK3S_INTERRUPT_PROGRAM
:
607 enum emulation_result er
;
611 flags
= to_svcpu(vcpu
)->shadow_srr1
& 0x1f0000ull
;
613 if (vcpu
->arch
.shared
->msr
& MSR_PR
) {
615 printk(KERN_INFO
"Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu
), kvmppc_get_last_inst(vcpu
));
617 if ((kvmppc_get_last_inst(vcpu
) & 0xff0007ff) !=
618 (INS_DCBZ
& 0xfffffff7)) {
619 kvmppc_core_queue_program(vcpu
, flags
);
625 vcpu
->stat
.emulated_inst_exits
++;
626 er
= kvmppc_emulate_instruction(run
, vcpu
);
635 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
636 __func__
, kvmppc_get_pc(vcpu
), kvmppc_get_last_inst(vcpu
));
637 kvmppc_core_queue_program(vcpu
, flags
);
640 case EMULATE_DO_MMIO
:
641 run
->exit_reason
= KVM_EXIT_MMIO
;
649 case BOOK3S_INTERRUPT_SYSCALL
:
650 if (vcpu
->arch
.osi_enabled
&&
651 (((u32
)kvmppc_get_gpr(vcpu
, 3)) == OSI_SC_MAGIC_R3
) &&
652 (((u32
)kvmppc_get_gpr(vcpu
, 4)) == OSI_SC_MAGIC_R4
)) {
654 u64
*gprs
= run
->osi
.gprs
;
657 run
->exit_reason
= KVM_EXIT_OSI
;
658 for (i
= 0; i
< 32; i
++)
659 gprs
[i
] = kvmppc_get_gpr(vcpu
, i
);
660 vcpu
->arch
.osi_needed
= 1;
662 } else if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
663 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
664 /* KVM PV hypercalls */
665 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
669 vcpu
->stat
.syscall_exits
++;
670 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
674 case BOOK3S_INTERRUPT_FP_UNAVAIL
:
675 case BOOK3S_INTERRUPT_ALTIVEC
:
676 case BOOK3S_INTERRUPT_VSX
:
681 case BOOK3S_INTERRUPT_FP_UNAVAIL
: ext_msr
= MSR_FP
; break;
682 case BOOK3S_INTERRUPT_ALTIVEC
: ext_msr
= MSR_VEC
; break;
683 case BOOK3S_INTERRUPT_VSX
: ext_msr
= MSR_VSX
; break;
686 switch (kvmppc_check_ext(vcpu
, exit_nr
)) {
688 /* everything ok - let's enable the ext */
689 r
= kvmppc_handle_ext(vcpu
, exit_nr
, ext_msr
);
692 /* we need to emulate this instruction */
693 goto program_interrupt
;
696 /* nothing to worry about - go again */
701 case BOOK3S_INTERRUPT_ALIGNMENT
:
702 if (kvmppc_read_inst(vcpu
) == EMULATE_DONE
) {
703 vcpu
->arch
.shared
->dsisr
= kvmppc_alignment_dsisr(vcpu
,
704 kvmppc_get_last_inst(vcpu
));
705 vcpu
->arch
.shared
->dar
= kvmppc_alignment_dar(vcpu
,
706 kvmppc_get_last_inst(vcpu
));
707 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
711 case BOOK3S_INTERRUPT_MACHINE_CHECK
:
712 case BOOK3S_INTERRUPT_TRACE
:
713 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
717 /* Ugh - bork here! What did we get? */
718 printk(KERN_EMERG
"exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
719 exit_nr
, kvmppc_get_pc(vcpu
), to_svcpu(vcpu
)->shadow_srr1
);
726 if (!(r
& RESUME_HOST
)) {
727 /* To avoid clobbering exit_reason, only check for signals if
728 * we aren't already exiting to userspace for some other
730 if (signal_pending(current
)) {
732 printk(KERN_EMERG
"KVM: Going back to host\n");
734 vcpu
->stat
.signal_exits
++;
735 run
->exit_reason
= KVM_EXIT_INTR
;
738 /* In case an interrupt came in that was triggered
739 * from userspace (like DEC), we need to check what
741 kvmppc_core_deliver_interrupts(vcpu
);
745 trace_kvm_book3s_reenter(r
, vcpu
);
750 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
751 struct kvm_sregs
*sregs
)
753 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
756 sregs
->pvr
= vcpu
->arch
.pvr
;
758 sregs
->u
.s
.sdr1
= to_book3s(vcpu
)->sdr1
;
759 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
760 for (i
= 0; i
< 64; i
++) {
761 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
| i
;
762 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
765 for (i
= 0; i
< 16; i
++)
766 sregs
->u
.s
.ppc32
.sr
[i
] = vcpu
->arch
.shared
->sr
[i
];
768 for (i
= 0; i
< 8; i
++) {
769 sregs
->u
.s
.ppc32
.ibat
[i
] = vcpu3s
->ibat
[i
].raw
;
770 sregs
->u
.s
.ppc32
.dbat
[i
] = vcpu3s
->dbat
[i
].raw
;
777 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
778 struct kvm_sregs
*sregs
)
780 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
783 kvmppc_set_pvr(vcpu
, sregs
->pvr
);
785 vcpu3s
->sdr1
= sregs
->u
.s
.sdr1
;
786 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
787 for (i
= 0; i
< 64; i
++) {
788 vcpu
->arch
.mmu
.slbmte(vcpu
, sregs
->u
.s
.ppc64
.slb
[i
].slbv
,
789 sregs
->u
.s
.ppc64
.slb
[i
].slbe
);
792 for (i
= 0; i
< 16; i
++) {
793 vcpu
->arch
.mmu
.mtsrin(vcpu
, i
, sregs
->u
.s
.ppc32
.sr
[i
]);
795 for (i
= 0; i
< 8; i
++) {
796 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), false,
797 (u32
)sregs
->u
.s
.ppc32
.ibat
[i
]);
798 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), true,
799 (u32
)(sregs
->u
.s
.ppc32
.ibat
[i
] >> 32));
800 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), false,
801 (u32
)sregs
->u
.s
.ppc32
.dbat
[i
]);
802 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), true,
803 (u32
)(sregs
->u
.s
.ppc32
.dbat
[i
] >> 32));
807 /* Flush the MMU after messing with the segments */
808 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
813 int kvmppc_core_check_processor_compat(void)
818 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
820 struct kvmppc_vcpu_book3s
*vcpu_book3s
;
821 struct kvm_vcpu
*vcpu
;
825 vcpu_book3s
= vzalloc(sizeof(struct kvmppc_vcpu_book3s
));
829 vcpu_book3s
->shadow_vcpu
= (struct kvmppc_book3s_shadow_vcpu
*)
830 kzalloc(sizeof(*vcpu_book3s
->shadow_vcpu
), GFP_KERNEL
);
831 if (!vcpu_book3s
->shadow_vcpu
)
834 vcpu
= &vcpu_book3s
->vcpu
;
835 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
837 goto free_shadow_vcpu
;
839 p
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
840 /* the real shared page fills the last 4k of our page */
841 vcpu
->arch
.shared
= (void*)(p
+ PAGE_SIZE
- 4096);
845 vcpu
->arch
.host_retip
= kvm_return_point
;
846 vcpu
->arch
.host_msr
= mfmsr();
847 #ifdef CONFIG_PPC_BOOK3S_64
848 /* default to book3s_64 (970fx) */
849 vcpu
->arch
.pvr
= 0x3C0301;
851 /* default to book3s_32 (750) */
852 vcpu
->arch
.pvr
= 0x84202;
854 kvmppc_set_pvr(vcpu
, vcpu
->arch
.pvr
);
855 vcpu
->arch
.slb_nr
= 64;
857 /* remember where some real-mode handlers are */
858 vcpu
->arch
.trampoline_lowmem
= __pa(kvmppc_handler_lowmem_trampoline
);
859 vcpu
->arch
.trampoline_enter
= __pa(kvmppc_handler_trampoline_enter
);
860 vcpu
->arch
.highmem_handler
= (ulong
)kvmppc_handler_highmem
;
861 #ifdef CONFIG_PPC_BOOK3S_64
862 vcpu
->arch
.rmcall
= *(ulong
*)kvmppc_rmcall
;
864 vcpu
->arch
.rmcall
= (ulong
)kvmppc_rmcall
;
867 vcpu
->arch
.shadow_msr
= MSR_USER64
;
869 err
= kvmppc_mmu_init(vcpu
);
876 kvm_vcpu_uninit(vcpu
);
878 kfree(vcpu_book3s
->shadow_vcpu
);
885 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
887 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
889 free_page((unsigned long)vcpu
->arch
.shared
& PAGE_MASK
);
890 kvm_vcpu_uninit(vcpu
);
891 kfree(vcpu_book3s
->shadow_vcpu
);
895 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
898 double fpr
[32][TS_FPRWIDTH
];
901 #ifdef CONFIG_ALTIVEC
904 unsigned long uninitialized_var(vrsave
);
912 /* No need to go into the guest when all we do is going out */
913 if (signal_pending(current
)) {
914 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
918 /* Save FPU state in stack */
919 if (current
->thread
.regs
->msr
& MSR_FP
)
921 memcpy(fpr
, current
->thread
.fpr
, sizeof(current
->thread
.fpr
));
922 fpscr
= current
->thread
.fpscr
.val
;
923 fpexc_mode
= current
->thread
.fpexc_mode
;
925 #ifdef CONFIG_ALTIVEC
926 /* Save Altivec state in stack */
927 used_vr
= current
->thread
.used_vr
;
929 if (current
->thread
.regs
->msr
& MSR_VEC
)
930 giveup_altivec(current
);
931 memcpy(vr
, current
->thread
.vr
, sizeof(current
->thread
.vr
));
932 vscr
= current
->thread
.vscr
;
933 vrsave
= current
->thread
.vrsave
;
938 /* Save VSX state in stack */
939 used_vsr
= current
->thread
.used_vsr
;
940 if (used_vsr
&& (current
->thread
.regs
->msr
& MSR_VSX
))
941 __giveup_vsx(current
);
944 /* Remember the MSR with disabled extensions */
945 ext_msr
= current
->thread
.regs
->msr
;
947 /* Preload FPU if it's enabled */
948 if (vcpu
->arch
.shared
->msr
& MSR_FP
)
949 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
953 ret
= __kvmppc_vcpu_run(kvm_run
, vcpu
);
959 current
->thread
.regs
->msr
= ext_msr
;
961 /* Make sure we save the guest FPU/Altivec/VSX state */
962 kvmppc_giveup_ext(vcpu
, MSR_FP
);
963 kvmppc_giveup_ext(vcpu
, MSR_VEC
);
964 kvmppc_giveup_ext(vcpu
, MSR_VSX
);
966 /* Restore FPU state from stack */
967 memcpy(current
->thread
.fpr
, fpr
, sizeof(current
->thread
.fpr
));
968 current
->thread
.fpscr
.val
= fpscr
;
969 current
->thread
.fpexc_mode
= fpexc_mode
;
971 #ifdef CONFIG_ALTIVEC
972 /* Restore Altivec state from stack */
973 if (used_vr
&& current
->thread
.used_vr
) {
974 memcpy(current
->thread
.vr
, vr
, sizeof(current
->thread
.vr
));
975 current
->thread
.vscr
= vscr
;
976 current
->thread
.vrsave
= vrsave
;
978 current
->thread
.used_vr
= used_vr
;
982 current
->thread
.used_vsr
= used_vsr
;
988 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
989 struct kvm_userspace_memory_region
*mem
)
994 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
995 struct kvm_userspace_memory_region
*mem
)
999 int kvmppc_core_init_vm(struct kvm
*kvm
)
1004 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
1008 static int kvmppc_book3s_init(void)
1012 r
= kvm_init(NULL
, sizeof(struct kvmppc_vcpu_book3s
), 0,
1018 r
= kvmppc_mmu_hpte_sysinit();
1023 static void kvmppc_book3s_exit(void)
1025 kvmppc_mmu_hpte_sysexit();
1029 module_init(kvmppc_book3s_init
);
1030 module_exit(kvmppc_book3s_exit
);