2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
44 /* #define EXIT_DEBUG */
46 struct kvm_stats_debugfs_item debugfs_entries
[] = {
47 { "exits", VCPU_STAT(sum_exits
) },
48 { "mmio", VCPU_STAT(mmio_exits
) },
49 { "sig", VCPU_STAT(signal_exits
) },
50 { "sysc", VCPU_STAT(syscall_exits
) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
52 { "dec", VCPU_STAT(dec_exits
) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
54 { "queue_intr", VCPU_STAT(queue_intr
) },
55 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
56 { "pf_storage", VCPU_STAT(pf_storage
) },
57 { "sp_storage", VCPU_STAT(sp_storage
) },
58 { "pf_instruc", VCPU_STAT(pf_instruc
) },
59 { "sp_instruc", VCPU_STAT(sp_instruc
) },
60 { "ld", VCPU_STAT(ld
) },
61 { "ld_slow", VCPU_STAT(ld_slow
) },
62 { "st", VCPU_STAT(st
) },
63 { "st_slow", VCPU_STAT(st_slow
) },
67 void kvmppc_core_load_host_debugstate(struct kvm_vcpu
*vcpu
)
71 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu
*vcpu
)
75 void kvmppc_unfixup_split_real(struct kvm_vcpu
*vcpu
)
77 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) {
78 ulong pc
= kvmppc_get_pc(vcpu
);
79 if ((pc
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
)
80 kvmppc_set_pc(vcpu
, pc
& ~SPLIT_HACK_MASK
);
81 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SPLIT_HACK
;
84 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real
);
86 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu
*vcpu
)
88 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
89 return to_book3s(vcpu
)->hior
;
93 static inline void kvmppc_update_int_pending(struct kvm_vcpu
*vcpu
,
94 unsigned long pending_now
, unsigned long old_pending
)
96 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
99 kvmppc_set_int_pending(vcpu
, 1);
100 else if (old_pending
)
101 kvmppc_set_int_pending(vcpu
, 0);
104 static inline bool kvmppc_critical_section(struct kvm_vcpu
*vcpu
)
110 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
113 crit_raw
= kvmppc_get_critical(vcpu
);
114 crit_r1
= kvmppc_get_gpr(vcpu
, 1);
116 /* Truncate crit indicators in 32 bit mode */
117 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
118 crit_raw
&= 0xffffffff;
119 crit_r1
&= 0xffffffff;
122 /* Critical section when crit == r1 */
123 crit
= (crit_raw
== crit_r1
);
124 /* ... and we're in supervisor mode */
125 crit
= crit
&& !(kvmppc_get_msr(vcpu
) & MSR_PR
);
130 void kvmppc_inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 flags
)
132 kvmppc_unfixup_split_real(vcpu
);
133 kvmppc_set_srr0(vcpu
, kvmppc_get_pc(vcpu
));
134 kvmppc_set_srr1(vcpu
, kvmppc_get_msr(vcpu
) | flags
);
135 kvmppc_set_pc(vcpu
, kvmppc_interrupt_offset(vcpu
) + vec
);
136 vcpu
->arch
.mmu
.reset_msr(vcpu
);
139 static int kvmppc_book3s_vec2irqprio(unsigned int vec
)
144 case 0x100: prio
= BOOK3S_IRQPRIO_SYSTEM_RESET
; break;
145 case 0x200: prio
= BOOK3S_IRQPRIO_MACHINE_CHECK
; break;
146 case 0x300: prio
= BOOK3S_IRQPRIO_DATA_STORAGE
; break;
147 case 0x380: prio
= BOOK3S_IRQPRIO_DATA_SEGMENT
; break;
148 case 0x400: prio
= BOOK3S_IRQPRIO_INST_STORAGE
; break;
149 case 0x480: prio
= BOOK3S_IRQPRIO_INST_SEGMENT
; break;
150 case 0x500: prio
= BOOK3S_IRQPRIO_EXTERNAL
; break;
151 case 0x501: prio
= BOOK3S_IRQPRIO_EXTERNAL_LEVEL
; break;
152 case 0x600: prio
= BOOK3S_IRQPRIO_ALIGNMENT
; break;
153 case 0x700: prio
= BOOK3S_IRQPRIO_PROGRAM
; break;
154 case 0x800: prio
= BOOK3S_IRQPRIO_FP_UNAVAIL
; break;
155 case 0x900: prio
= BOOK3S_IRQPRIO_DECREMENTER
; break;
156 case 0xc00: prio
= BOOK3S_IRQPRIO_SYSCALL
; break;
157 case 0xd00: prio
= BOOK3S_IRQPRIO_DEBUG
; break;
158 case 0xf20: prio
= BOOK3S_IRQPRIO_ALTIVEC
; break;
159 case 0xf40: prio
= BOOK3S_IRQPRIO_VSX
; break;
160 case 0xf60: prio
= BOOK3S_IRQPRIO_FAC_UNAVAIL
; break;
161 default: prio
= BOOK3S_IRQPRIO_MAX
; break;
167 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu
*vcpu
,
170 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
172 clear_bit(kvmppc_book3s_vec2irqprio(vec
),
173 &vcpu
->arch
.pending_exceptions
);
175 kvmppc_update_int_pending(vcpu
, vcpu
->arch
.pending_exceptions
,
179 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu
*vcpu
, unsigned int vec
)
181 vcpu
->stat
.queue_intr
++;
183 set_bit(kvmppc_book3s_vec2irqprio(vec
),
184 &vcpu
->arch
.pending_exceptions
);
186 printk(KERN_INFO
"Queueing interrupt %x\n", vec
);
189 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio
);
191 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
)
193 /* might as well deliver this straight away */
194 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_PROGRAM
, flags
);
196 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program
);
198 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
200 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
202 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec
);
204 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
206 return test_bit(BOOK3S_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
208 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec
);
210 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
212 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
214 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec
);
216 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
217 struct kvm_interrupt
*irq
)
219 unsigned int vec
= BOOK3S_INTERRUPT_EXTERNAL
;
221 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
222 vec
= BOOK3S_INTERRUPT_EXTERNAL_LEVEL
;
224 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
227 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
)
229 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
230 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
233 void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
, ulong dar
,
236 kvmppc_set_dar(vcpu
, dar
);
237 kvmppc_set_dsisr(vcpu
, flags
);
238 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
);
241 void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
, ulong flags
)
243 u64 msr
= kvmppc_get_msr(vcpu
);
244 msr
&= ~(SRR1_ISI_NOPT
| SRR1_ISI_N_OR_G
| SRR1_ISI_PROT
);
245 msr
|= flags
& (SRR1_ISI_NOPT
| SRR1_ISI_N_OR_G
| SRR1_ISI_PROT
);
246 kvmppc_set_msr_fast(vcpu
, msr
);
247 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
);
250 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu
*vcpu
, unsigned int priority
)
254 bool crit
= kvmppc_critical_section(vcpu
);
257 case BOOK3S_IRQPRIO_DECREMENTER
:
258 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
259 vec
= BOOK3S_INTERRUPT_DECREMENTER
;
261 case BOOK3S_IRQPRIO_EXTERNAL
:
262 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
263 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
264 vec
= BOOK3S_INTERRUPT_EXTERNAL
;
266 case BOOK3S_IRQPRIO_SYSTEM_RESET
:
267 vec
= BOOK3S_INTERRUPT_SYSTEM_RESET
;
269 case BOOK3S_IRQPRIO_MACHINE_CHECK
:
270 vec
= BOOK3S_INTERRUPT_MACHINE_CHECK
;
272 case BOOK3S_IRQPRIO_DATA_STORAGE
:
273 vec
= BOOK3S_INTERRUPT_DATA_STORAGE
;
275 case BOOK3S_IRQPRIO_INST_STORAGE
:
276 vec
= BOOK3S_INTERRUPT_INST_STORAGE
;
278 case BOOK3S_IRQPRIO_DATA_SEGMENT
:
279 vec
= BOOK3S_INTERRUPT_DATA_SEGMENT
;
281 case BOOK3S_IRQPRIO_INST_SEGMENT
:
282 vec
= BOOK3S_INTERRUPT_INST_SEGMENT
;
284 case BOOK3S_IRQPRIO_ALIGNMENT
:
285 vec
= BOOK3S_INTERRUPT_ALIGNMENT
;
287 case BOOK3S_IRQPRIO_PROGRAM
:
288 vec
= BOOK3S_INTERRUPT_PROGRAM
;
290 case BOOK3S_IRQPRIO_VSX
:
291 vec
= BOOK3S_INTERRUPT_VSX
;
293 case BOOK3S_IRQPRIO_ALTIVEC
:
294 vec
= BOOK3S_INTERRUPT_ALTIVEC
;
296 case BOOK3S_IRQPRIO_FP_UNAVAIL
:
297 vec
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
299 case BOOK3S_IRQPRIO_SYSCALL
:
300 vec
= BOOK3S_INTERRUPT_SYSCALL
;
302 case BOOK3S_IRQPRIO_DEBUG
:
303 vec
= BOOK3S_INTERRUPT_TRACE
;
305 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR
:
306 vec
= BOOK3S_INTERRUPT_PERFMON
;
308 case BOOK3S_IRQPRIO_FAC_UNAVAIL
:
309 vec
= BOOK3S_INTERRUPT_FAC_UNAVAIL
;
313 printk(KERN_ERR
"KVM: Unknown interrupt: 0x%x\n", priority
);
318 printk(KERN_INFO
"Deliver interrupt 0x%x? %x\n", vec
, deliver
);
322 kvmppc_inject_interrupt(vcpu
, vec
, 0);
328 * This function determines if an irqprio should be cleared once issued.
330 static bool clear_irqprio(struct kvm_vcpu
*vcpu
, unsigned int priority
)
333 case BOOK3S_IRQPRIO_DECREMENTER
:
334 /* DEC interrupts get cleared by mtdec */
336 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
337 /* External interrupts get cleared by userspace */
344 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
346 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
347 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
348 unsigned int priority
;
351 if (vcpu
->arch
.pending_exceptions
)
352 printk(KERN_EMERG
"KVM: Check pending: %lx\n", vcpu
->arch
.pending_exceptions
);
354 priority
= __ffs(*pending
);
355 while (priority
< BOOK3S_IRQPRIO_MAX
) {
356 if (kvmppc_book3s_irqprio_deliver(vcpu
, priority
) &&
357 clear_irqprio(vcpu
, priority
)) {
358 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
362 priority
= find_next_bit(pending
,
363 BITS_PER_BYTE
* sizeof(*pending
),
367 /* Tell the guest about our interrupt status */
368 kvmppc_update_int_pending(vcpu
, *pending
, old_pending
);
372 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter
);
374 pfn_t
kvmppc_gpa_to_pfn(struct kvm_vcpu
*vcpu
, gpa_t gpa
, bool writing
,
377 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
;
378 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
380 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
381 mp_pa
= (uint32_t)mp_pa
;
383 /* Magic page override */
385 if (unlikely(mp_pa
) && unlikely((gpa
& KVM_PAM
) == mp_pa
)) {
386 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
389 pfn
= (pfn_t
)virt_to_phys((void*)shared_page
) >> PAGE_SHIFT
;
390 get_page(pfn_to_page(pfn
));
396 return gfn_to_pfn_prot(vcpu
->kvm
, gfn
, writing
, writable
);
398 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn
);
400 int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
, enum xlate_instdata xlid
,
401 enum xlate_readwrite xlrw
, struct kvmppc_pte
*pte
)
403 bool data
= (xlid
== XLATE_DATA
);
404 bool iswrite
= (xlrw
== XLATE_WRITE
);
405 int relocated
= (kvmppc_get_msr(vcpu
) & (data
? MSR_DR
: MSR_IR
));
409 r
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, pte
, data
, iswrite
);
412 pte
->raddr
= eaddr
& KVM_PAM
;
413 pte
->vpage
= VSID_REAL
| eaddr
>> 12;
414 pte
->may_read
= true;
415 pte
->may_write
= true;
416 pte
->may_execute
= true;
419 if ((kvmppc_get_msr(vcpu
) & (MSR_IR
| MSR_DR
)) == MSR_DR
&&
421 if ((vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) &&
422 ((eaddr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
))
423 pte
->raddr
&= ~SPLIT_HACK_MASK
;
430 int kvmppc_load_last_inst(struct kvm_vcpu
*vcpu
, enum instruction_type type
,
433 ulong pc
= kvmppc_get_pc(vcpu
);
439 r
= kvmppc_ld(vcpu
, &pc
, sizeof(u32
), inst
, false);
440 if (r
== EMULATE_DONE
)
443 return EMULATE_AGAIN
;
445 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst
);
447 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
452 int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
)
457 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
461 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
462 struct kvm_sregs
*sregs
)
464 return vcpu
->kvm
->arch
.kvm_ops
->get_sregs(vcpu
, sregs
);
467 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
468 struct kvm_sregs
*sregs
)
470 return vcpu
->kvm
->arch
.kvm_ops
->set_sregs(vcpu
, sregs
);
473 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
477 regs
->pc
= kvmppc_get_pc(vcpu
);
478 regs
->cr
= kvmppc_get_cr(vcpu
);
479 regs
->ctr
= kvmppc_get_ctr(vcpu
);
480 regs
->lr
= kvmppc_get_lr(vcpu
);
481 regs
->xer
= kvmppc_get_xer(vcpu
);
482 regs
->msr
= kvmppc_get_msr(vcpu
);
483 regs
->srr0
= kvmppc_get_srr0(vcpu
);
484 regs
->srr1
= kvmppc_get_srr1(vcpu
);
485 regs
->pid
= vcpu
->arch
.pid
;
486 regs
->sprg0
= kvmppc_get_sprg0(vcpu
);
487 regs
->sprg1
= kvmppc_get_sprg1(vcpu
);
488 regs
->sprg2
= kvmppc_get_sprg2(vcpu
);
489 regs
->sprg3
= kvmppc_get_sprg3(vcpu
);
490 regs
->sprg4
= kvmppc_get_sprg4(vcpu
);
491 regs
->sprg5
= kvmppc_get_sprg5(vcpu
);
492 regs
->sprg6
= kvmppc_get_sprg6(vcpu
);
493 regs
->sprg7
= kvmppc_get_sprg7(vcpu
);
495 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
496 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
501 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
505 kvmppc_set_pc(vcpu
, regs
->pc
);
506 kvmppc_set_cr(vcpu
, regs
->cr
);
507 kvmppc_set_ctr(vcpu
, regs
->ctr
);
508 kvmppc_set_lr(vcpu
, regs
->lr
);
509 kvmppc_set_xer(vcpu
, regs
->xer
);
510 kvmppc_set_msr(vcpu
, regs
->msr
);
511 kvmppc_set_srr0(vcpu
, regs
->srr0
);
512 kvmppc_set_srr1(vcpu
, regs
->srr1
);
513 kvmppc_set_sprg0(vcpu
, regs
->sprg0
);
514 kvmppc_set_sprg1(vcpu
, regs
->sprg1
);
515 kvmppc_set_sprg2(vcpu
, regs
->sprg2
);
516 kvmppc_set_sprg3(vcpu
, regs
->sprg3
);
517 kvmppc_set_sprg4(vcpu
, regs
->sprg4
);
518 kvmppc_set_sprg5(vcpu
, regs
->sprg5
);
519 kvmppc_set_sprg6(vcpu
, regs
->sprg6
);
520 kvmppc_set_sprg7(vcpu
, regs
->sprg7
);
522 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
523 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
528 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
533 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
538 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
541 union kvmppc_one_reg val
;
545 size
= one_reg_size(reg
->id
);
546 if (size
> sizeof(val
))
549 r
= vcpu
->kvm
->arch
.kvm_ops
->get_one_reg(vcpu
, reg
->id
, &val
);
553 case KVM_REG_PPC_DAR
:
554 val
= get_reg_val(reg
->id
, kvmppc_get_dar(vcpu
));
556 case KVM_REG_PPC_DSISR
:
557 val
= get_reg_val(reg
->id
, kvmppc_get_dsisr(vcpu
));
559 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
560 i
= reg
->id
- KVM_REG_PPC_FPR0
;
561 val
= get_reg_val(reg
->id
, VCPU_FPR(vcpu
, i
));
563 case KVM_REG_PPC_FPSCR
:
564 val
= get_reg_val(reg
->id
, vcpu
->arch
.fp
.fpscr
);
566 #ifdef CONFIG_ALTIVEC
567 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
568 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
572 val
.vval
= vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
];
574 case KVM_REG_PPC_VSCR
:
575 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
579 val
= get_reg_val(reg
->id
, vcpu
->arch
.vr
.vscr
.u
[3]);
581 case KVM_REG_PPC_VRSAVE
:
582 val
= get_reg_val(reg
->id
, vcpu
->arch
.vrsave
);
584 #endif /* CONFIG_ALTIVEC */
586 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
587 if (cpu_has_feature(CPU_FTR_VSX
)) {
588 long int i
= reg
->id
- KVM_REG_PPC_VSR0
;
589 val
.vsxval
[0] = vcpu
->arch
.fp
.fpr
[i
][0];
590 val
.vsxval
[1] = vcpu
->arch
.fp
.fpr
[i
][1];
595 #endif /* CONFIG_VSX */
596 case KVM_REG_PPC_DEBUG_INST
: {
598 r
= copy_to_user((u32 __user
*)(long)reg
->addr
,
599 &opcode
, sizeof(u32
));
602 #ifdef CONFIG_KVM_XICS
603 case KVM_REG_PPC_ICP_STATE
:
604 if (!vcpu
->arch
.icp
) {
608 val
= get_reg_val(reg
->id
, kvmppc_xics_get_icp(vcpu
));
610 #endif /* CONFIG_KVM_XICS */
611 case KVM_REG_PPC_FSCR
:
612 val
= get_reg_val(reg
->id
, vcpu
->arch
.fscr
);
614 case KVM_REG_PPC_TAR
:
615 val
= get_reg_val(reg
->id
, vcpu
->arch
.tar
);
617 case KVM_REG_PPC_EBBHR
:
618 val
= get_reg_val(reg
->id
, vcpu
->arch
.ebbhr
);
620 case KVM_REG_PPC_EBBRR
:
621 val
= get_reg_val(reg
->id
, vcpu
->arch
.ebbrr
);
623 case KVM_REG_PPC_BESCR
:
624 val
= get_reg_val(reg
->id
, vcpu
->arch
.bescr
);
626 case KVM_REG_PPC_VTB
:
627 val
= get_reg_val(reg
->id
, vcpu
->arch
.vtb
);
630 val
= get_reg_val(reg
->id
, vcpu
->arch
.ic
);
640 if (copy_to_user((char __user
*)(unsigned long)reg
->addr
, &val
, size
))
646 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
649 union kvmppc_one_reg val
;
653 size
= one_reg_size(reg
->id
);
654 if (size
> sizeof(val
))
657 if (copy_from_user(&val
, (char __user
*)(unsigned long)reg
->addr
, size
))
660 r
= vcpu
->kvm
->arch
.kvm_ops
->set_one_reg(vcpu
, reg
->id
, &val
);
664 case KVM_REG_PPC_DAR
:
665 kvmppc_set_dar(vcpu
, set_reg_val(reg
->id
, val
));
667 case KVM_REG_PPC_DSISR
:
668 kvmppc_set_dsisr(vcpu
, set_reg_val(reg
->id
, val
));
670 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
671 i
= reg
->id
- KVM_REG_PPC_FPR0
;
672 VCPU_FPR(vcpu
, i
) = set_reg_val(reg
->id
, val
);
674 case KVM_REG_PPC_FPSCR
:
675 vcpu
->arch
.fp
.fpscr
= set_reg_val(reg
->id
, val
);
677 #ifdef CONFIG_ALTIVEC
678 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
679 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
683 vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
] = val
.vval
;
685 case KVM_REG_PPC_VSCR
:
686 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
690 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(reg
->id
, val
);
692 case KVM_REG_PPC_VRSAVE
:
693 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
697 vcpu
->arch
.vrsave
= set_reg_val(reg
->id
, val
);
699 #endif /* CONFIG_ALTIVEC */
701 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
702 if (cpu_has_feature(CPU_FTR_VSX
)) {
703 long int i
= reg
->id
- KVM_REG_PPC_VSR0
;
704 vcpu
->arch
.fp
.fpr
[i
][0] = val
.vsxval
[0];
705 vcpu
->arch
.fp
.fpr
[i
][1] = val
.vsxval
[1];
710 #endif /* CONFIG_VSX */
711 #ifdef CONFIG_KVM_XICS
712 case KVM_REG_PPC_ICP_STATE
:
713 if (!vcpu
->arch
.icp
) {
717 r
= kvmppc_xics_set_icp(vcpu
,
718 set_reg_val(reg
->id
, val
));
720 #endif /* CONFIG_KVM_XICS */
721 case KVM_REG_PPC_FSCR
:
722 vcpu
->arch
.fscr
= set_reg_val(reg
->id
, val
);
724 case KVM_REG_PPC_TAR
:
725 vcpu
->arch
.tar
= set_reg_val(reg
->id
, val
);
727 case KVM_REG_PPC_EBBHR
:
728 vcpu
->arch
.ebbhr
= set_reg_val(reg
->id
, val
);
730 case KVM_REG_PPC_EBBRR
:
731 vcpu
->arch
.ebbrr
= set_reg_val(reg
->id
, val
);
733 case KVM_REG_PPC_BESCR
:
734 vcpu
->arch
.bescr
= set_reg_val(reg
->id
, val
);
736 case KVM_REG_PPC_VTB
:
737 vcpu
->arch
.vtb
= set_reg_val(reg
->id
, val
);
740 vcpu
->arch
.ic
= set_reg_val(reg
->id
, val
);
751 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
753 vcpu
->kvm
->arch
.kvm_ops
->vcpu_load(vcpu
, cpu
);
756 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
758 vcpu
->kvm
->arch
.kvm_ops
->vcpu_put(vcpu
);
761 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
763 vcpu
->kvm
->arch
.kvm_ops
->set_msr(vcpu
, msr
);
765 EXPORT_SYMBOL_GPL(kvmppc_set_msr
);
767 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
769 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_run(kvm_run
, vcpu
);
772 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
773 struct kvm_translation
*tr
)
778 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
779 struct kvm_guest_debug
*dbg
)
784 void kvmppc_decrementer_func(unsigned long data
)
786 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
788 kvmppc_core_queue_dec(vcpu
);
792 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
794 return kvm
->arch
.kvm_ops
->vcpu_create(kvm
, id
);
797 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
799 vcpu
->kvm
->arch
.kvm_ops
->vcpu_free(vcpu
);
802 int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
804 return vcpu
->kvm
->arch
.kvm_ops
->check_requests(vcpu
);
807 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
809 return kvm
->arch
.kvm_ops
->get_dirty_log(kvm
, log
);
812 void kvmppc_core_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
813 struct kvm_memory_slot
*dont
)
815 kvm
->arch
.kvm_ops
->free_memslot(free
, dont
);
818 int kvmppc_core_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
819 unsigned long npages
)
821 return kvm
->arch
.kvm_ops
->create_memslot(slot
, npages
);
824 void kvmppc_core_flush_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
826 kvm
->arch
.kvm_ops
->flush_memslot(kvm
, memslot
);
829 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
830 struct kvm_memory_slot
*memslot
,
831 struct kvm_userspace_memory_region
*mem
)
833 return kvm
->arch
.kvm_ops
->prepare_memory_region(kvm
, memslot
, mem
);
836 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
837 struct kvm_userspace_memory_region
*mem
,
838 const struct kvm_memory_slot
*old
)
840 kvm
->arch
.kvm_ops
->commit_memory_region(kvm
, mem
, old
);
843 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
845 return kvm
->arch
.kvm_ops
->unmap_hva(kvm
, hva
);
847 EXPORT_SYMBOL_GPL(kvm_unmap_hva
);
849 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
851 return kvm
->arch
.kvm_ops
->unmap_hva_range(kvm
, start
, end
);
854 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
856 return kvm
->arch
.kvm_ops
->age_hva(kvm
, hva
);
859 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
861 return kvm
->arch
.kvm_ops
->test_age_hva(kvm
, hva
);
864 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
866 kvm
->arch
.kvm_ops
->set_spte_hva(kvm
, hva
, pte
);
869 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
871 vcpu
->kvm
->arch
.kvm_ops
->mmu_destroy(vcpu
);
874 int kvmppc_core_init_vm(struct kvm
*kvm
)
878 INIT_LIST_HEAD(&kvm
->arch
.spapr_tce_tables
);
879 INIT_LIST_HEAD(&kvm
->arch
.rtas_tokens
);
882 return kvm
->arch
.kvm_ops
->init_vm(kvm
);
885 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
887 kvm
->arch
.kvm_ops
->destroy_vm(kvm
);
890 kvmppc_rtas_tokens_free(kvm
);
891 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
895 int kvmppc_core_check_processor_compat(void)
898 * We always return 0 for book3s. We check
899 * for compatability while loading the HV
905 int kvmppc_book3s_hcall_implemented(struct kvm
*kvm
, unsigned long hcall
)
907 return kvm
->arch
.kvm_ops
->hcall_implemented(hcall
);
910 static int kvmppc_book3s_init(void)
914 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
917 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
918 r
= kvmppc_book3s_init_pr();
924 static void kvmppc_book3s_exit(void)
926 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
927 kvmppc_book3s_exit_pr();
932 module_init(kvmppc_book3s_init
);
933 module_exit(kvmppc_book3s_exit
);
935 /* On 32bit this is our one and only kernel module */
936 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
937 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
938 MODULE_ALIAS("devname:kvm");