2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <linux/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
44 /* #define EXIT_DEBUG */
46 struct kvm_stats_debugfs_item debugfs_entries
[] = {
47 { "exits", VCPU_STAT(sum_exits
) },
48 { "mmio", VCPU_STAT(mmio_exits
) },
49 { "sig", VCPU_STAT(signal_exits
) },
50 { "sysc", VCPU_STAT(syscall_exits
) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
52 { "dec", VCPU_STAT(dec_exits
) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
54 { "queue_intr", VCPU_STAT(queue_intr
) },
55 { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns
) },
56 { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns
) },
57 { "halt_wait_ns", VCPU_STAT(halt_wait_ns
) },
58 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
), },
59 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
), },
60 { "halt_successful_wait", VCPU_STAT(halt_successful_wait
) },
61 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid
) },
62 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
63 { "pf_storage", VCPU_STAT(pf_storage
) },
64 { "sp_storage", VCPU_STAT(sp_storage
) },
65 { "pf_instruc", VCPU_STAT(pf_instruc
) },
66 { "sp_instruc", VCPU_STAT(sp_instruc
) },
67 { "ld", VCPU_STAT(ld
) },
68 { "ld_slow", VCPU_STAT(ld_slow
) },
69 { "st", VCPU_STAT(st
) },
70 { "st_slow", VCPU_STAT(st_slow
) },
71 { "pthru_all", VCPU_STAT(pthru_all
) },
72 { "pthru_host", VCPU_STAT(pthru_host
) },
73 { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff
) },
77 void kvmppc_unfixup_split_real(struct kvm_vcpu
*vcpu
)
79 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) {
80 ulong pc
= kvmppc_get_pc(vcpu
);
81 if ((pc
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
)
82 kvmppc_set_pc(vcpu
, pc
& ~SPLIT_HACK_MASK
);
83 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SPLIT_HACK
;
86 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real
);
88 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu
*vcpu
)
90 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
91 return to_book3s(vcpu
)->hior
;
95 static inline void kvmppc_update_int_pending(struct kvm_vcpu
*vcpu
,
96 unsigned long pending_now
, unsigned long old_pending
)
98 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
101 kvmppc_set_int_pending(vcpu
, 1);
102 else if (old_pending
)
103 kvmppc_set_int_pending(vcpu
, 0);
106 static inline bool kvmppc_critical_section(struct kvm_vcpu
*vcpu
)
112 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
115 crit_raw
= kvmppc_get_critical(vcpu
);
116 crit_r1
= kvmppc_get_gpr(vcpu
, 1);
118 /* Truncate crit indicators in 32 bit mode */
119 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
120 crit_raw
&= 0xffffffff;
121 crit_r1
&= 0xffffffff;
124 /* Critical section when crit == r1 */
125 crit
= (crit_raw
== crit_r1
);
126 /* ... and we're in supervisor mode */
127 crit
= crit
&& !(kvmppc_get_msr(vcpu
) & MSR_PR
);
132 void kvmppc_inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 flags
)
134 kvmppc_unfixup_split_real(vcpu
);
135 kvmppc_set_srr0(vcpu
, kvmppc_get_pc(vcpu
));
136 kvmppc_set_srr1(vcpu
, kvmppc_get_msr(vcpu
) | flags
);
137 kvmppc_set_pc(vcpu
, kvmppc_interrupt_offset(vcpu
) + vec
);
138 vcpu
->arch
.mmu
.reset_msr(vcpu
);
141 static int kvmppc_book3s_vec2irqprio(unsigned int vec
)
146 case 0x100: prio
= BOOK3S_IRQPRIO_SYSTEM_RESET
; break;
147 case 0x200: prio
= BOOK3S_IRQPRIO_MACHINE_CHECK
; break;
148 case 0x300: prio
= BOOK3S_IRQPRIO_DATA_STORAGE
; break;
149 case 0x380: prio
= BOOK3S_IRQPRIO_DATA_SEGMENT
; break;
150 case 0x400: prio
= BOOK3S_IRQPRIO_INST_STORAGE
; break;
151 case 0x480: prio
= BOOK3S_IRQPRIO_INST_SEGMENT
; break;
152 case 0x500: prio
= BOOK3S_IRQPRIO_EXTERNAL
; break;
153 case 0x501: prio
= BOOK3S_IRQPRIO_EXTERNAL_LEVEL
; break;
154 case 0x600: prio
= BOOK3S_IRQPRIO_ALIGNMENT
; break;
155 case 0x700: prio
= BOOK3S_IRQPRIO_PROGRAM
; break;
156 case 0x800: prio
= BOOK3S_IRQPRIO_FP_UNAVAIL
; break;
157 case 0x900: prio
= BOOK3S_IRQPRIO_DECREMENTER
; break;
158 case 0xc00: prio
= BOOK3S_IRQPRIO_SYSCALL
; break;
159 case 0xd00: prio
= BOOK3S_IRQPRIO_DEBUG
; break;
160 case 0xf20: prio
= BOOK3S_IRQPRIO_ALTIVEC
; break;
161 case 0xf40: prio
= BOOK3S_IRQPRIO_VSX
; break;
162 case 0xf60: prio
= BOOK3S_IRQPRIO_FAC_UNAVAIL
; break;
163 default: prio
= BOOK3S_IRQPRIO_MAX
; break;
169 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu
*vcpu
,
172 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
174 clear_bit(kvmppc_book3s_vec2irqprio(vec
),
175 &vcpu
->arch
.pending_exceptions
);
177 kvmppc_update_int_pending(vcpu
, vcpu
->arch
.pending_exceptions
,
181 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu
*vcpu
, unsigned int vec
)
183 vcpu
->stat
.queue_intr
++;
185 set_bit(kvmppc_book3s_vec2irqprio(vec
),
186 &vcpu
->arch
.pending_exceptions
);
188 printk(KERN_INFO
"Queueing interrupt %x\n", vec
);
191 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio
);
193 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
)
195 /* might as well deliver this straight away */
196 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_PROGRAM
, flags
);
198 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program
);
200 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
202 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
204 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec
);
206 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
208 return test_bit(BOOK3S_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
210 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec
);
212 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
214 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
216 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec
);
218 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
219 struct kvm_interrupt
*irq
)
221 unsigned int vec
= BOOK3S_INTERRUPT_EXTERNAL
;
223 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
224 vec
= BOOK3S_INTERRUPT_EXTERNAL_LEVEL
;
226 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
229 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
)
231 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
232 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
235 void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
, ulong dar
,
238 kvmppc_set_dar(vcpu
, dar
);
239 kvmppc_set_dsisr(vcpu
, flags
);
240 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
);
242 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage
); /* used by kvm_hv */
244 void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
, ulong flags
)
246 u64 msr
= kvmppc_get_msr(vcpu
);
247 msr
&= ~(SRR1_ISI_NOPT
| SRR1_ISI_N_OR_G
| SRR1_ISI_PROT
);
248 msr
|= flags
& (SRR1_ISI_NOPT
| SRR1_ISI_N_OR_G
| SRR1_ISI_PROT
);
249 kvmppc_set_msr_fast(vcpu
, msr
);
250 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
);
253 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu
*vcpu
,
254 unsigned int priority
)
258 bool crit
= kvmppc_critical_section(vcpu
);
261 case BOOK3S_IRQPRIO_DECREMENTER
:
262 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
263 vec
= BOOK3S_INTERRUPT_DECREMENTER
;
265 case BOOK3S_IRQPRIO_EXTERNAL
:
266 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
267 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
268 vec
= BOOK3S_INTERRUPT_EXTERNAL
;
270 case BOOK3S_IRQPRIO_SYSTEM_RESET
:
271 vec
= BOOK3S_INTERRUPT_SYSTEM_RESET
;
273 case BOOK3S_IRQPRIO_MACHINE_CHECK
:
274 vec
= BOOK3S_INTERRUPT_MACHINE_CHECK
;
276 case BOOK3S_IRQPRIO_DATA_STORAGE
:
277 vec
= BOOK3S_INTERRUPT_DATA_STORAGE
;
279 case BOOK3S_IRQPRIO_INST_STORAGE
:
280 vec
= BOOK3S_INTERRUPT_INST_STORAGE
;
282 case BOOK3S_IRQPRIO_DATA_SEGMENT
:
283 vec
= BOOK3S_INTERRUPT_DATA_SEGMENT
;
285 case BOOK3S_IRQPRIO_INST_SEGMENT
:
286 vec
= BOOK3S_INTERRUPT_INST_SEGMENT
;
288 case BOOK3S_IRQPRIO_ALIGNMENT
:
289 vec
= BOOK3S_INTERRUPT_ALIGNMENT
;
291 case BOOK3S_IRQPRIO_PROGRAM
:
292 vec
= BOOK3S_INTERRUPT_PROGRAM
;
294 case BOOK3S_IRQPRIO_VSX
:
295 vec
= BOOK3S_INTERRUPT_VSX
;
297 case BOOK3S_IRQPRIO_ALTIVEC
:
298 vec
= BOOK3S_INTERRUPT_ALTIVEC
;
300 case BOOK3S_IRQPRIO_FP_UNAVAIL
:
301 vec
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
303 case BOOK3S_IRQPRIO_SYSCALL
:
304 vec
= BOOK3S_INTERRUPT_SYSCALL
;
306 case BOOK3S_IRQPRIO_DEBUG
:
307 vec
= BOOK3S_INTERRUPT_TRACE
;
309 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR
:
310 vec
= BOOK3S_INTERRUPT_PERFMON
;
312 case BOOK3S_IRQPRIO_FAC_UNAVAIL
:
313 vec
= BOOK3S_INTERRUPT_FAC_UNAVAIL
;
317 printk(KERN_ERR
"KVM: Unknown interrupt: 0x%x\n", priority
);
322 printk(KERN_INFO
"Deliver interrupt 0x%x? %x\n", vec
, deliver
);
326 kvmppc_inject_interrupt(vcpu
, vec
, 0);
332 * This function determines if an irqprio should be cleared once issued.
334 static bool clear_irqprio(struct kvm_vcpu
*vcpu
, unsigned int priority
)
337 case BOOK3S_IRQPRIO_DECREMENTER
:
338 /* DEC interrupts get cleared by mtdec */
340 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
341 /* External interrupts get cleared by userspace */
348 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
350 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
351 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
352 unsigned int priority
;
355 if (vcpu
->arch
.pending_exceptions
)
356 printk(KERN_EMERG
"KVM: Check pending: %lx\n", vcpu
->arch
.pending_exceptions
);
358 priority
= __ffs(*pending
);
359 while (priority
< BOOK3S_IRQPRIO_MAX
) {
360 if (kvmppc_book3s_irqprio_deliver(vcpu
, priority
) &&
361 clear_irqprio(vcpu
, priority
)) {
362 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
366 priority
= find_next_bit(pending
,
367 BITS_PER_BYTE
* sizeof(*pending
),
371 /* Tell the guest about our interrupt status */
372 kvmppc_update_int_pending(vcpu
, *pending
, old_pending
);
376 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter
);
378 kvm_pfn_t
kvmppc_gpa_to_pfn(struct kvm_vcpu
*vcpu
, gpa_t gpa
, bool writing
,
381 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
;
382 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
384 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
385 mp_pa
= (uint32_t)mp_pa
;
387 /* Magic page override */
389 if (unlikely(mp_pa
) && unlikely((gpa
& KVM_PAM
) == mp_pa
)) {
390 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
393 pfn
= (kvm_pfn_t
)virt_to_phys((void*)shared_page
) >> PAGE_SHIFT
;
394 get_page(pfn_to_page(pfn
));
400 return gfn_to_pfn_prot(vcpu
->kvm
, gfn
, writing
, writable
);
402 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn
);
404 int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
, enum xlate_instdata xlid
,
405 enum xlate_readwrite xlrw
, struct kvmppc_pte
*pte
)
407 bool data
= (xlid
== XLATE_DATA
);
408 bool iswrite
= (xlrw
== XLATE_WRITE
);
409 int relocated
= (kvmppc_get_msr(vcpu
) & (data
? MSR_DR
: MSR_IR
));
413 r
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, pte
, data
, iswrite
);
416 pte
->raddr
= eaddr
& KVM_PAM
;
417 pte
->vpage
= VSID_REAL
| eaddr
>> 12;
418 pte
->may_read
= true;
419 pte
->may_write
= true;
420 pte
->may_execute
= true;
423 if ((kvmppc_get_msr(vcpu
) & (MSR_IR
| MSR_DR
)) == MSR_DR
&&
425 if ((vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) &&
426 ((eaddr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
))
427 pte
->raddr
&= ~SPLIT_HACK_MASK
;
434 int kvmppc_load_last_inst(struct kvm_vcpu
*vcpu
, enum instruction_type type
,
437 ulong pc
= kvmppc_get_pc(vcpu
);
443 r
= kvmppc_ld(vcpu
, &pc
, sizeof(u32
), inst
, false);
444 if (r
== EMULATE_DONE
)
447 return EMULATE_AGAIN
;
449 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst
);
451 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
456 int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
)
461 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
465 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
466 struct kvm_sregs
*sregs
)
468 return vcpu
->kvm
->arch
.kvm_ops
->get_sregs(vcpu
, sregs
);
471 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
472 struct kvm_sregs
*sregs
)
474 return vcpu
->kvm
->arch
.kvm_ops
->set_sregs(vcpu
, sregs
);
477 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
481 regs
->pc
= kvmppc_get_pc(vcpu
);
482 regs
->cr
= kvmppc_get_cr(vcpu
);
483 regs
->ctr
= kvmppc_get_ctr(vcpu
);
484 regs
->lr
= kvmppc_get_lr(vcpu
);
485 regs
->xer
= kvmppc_get_xer(vcpu
);
486 regs
->msr
= kvmppc_get_msr(vcpu
);
487 regs
->srr0
= kvmppc_get_srr0(vcpu
);
488 regs
->srr1
= kvmppc_get_srr1(vcpu
);
489 regs
->pid
= vcpu
->arch
.pid
;
490 regs
->sprg0
= kvmppc_get_sprg0(vcpu
);
491 regs
->sprg1
= kvmppc_get_sprg1(vcpu
);
492 regs
->sprg2
= kvmppc_get_sprg2(vcpu
);
493 regs
->sprg3
= kvmppc_get_sprg3(vcpu
);
494 regs
->sprg4
= kvmppc_get_sprg4(vcpu
);
495 regs
->sprg5
= kvmppc_get_sprg5(vcpu
);
496 regs
->sprg6
= kvmppc_get_sprg6(vcpu
);
497 regs
->sprg7
= kvmppc_get_sprg7(vcpu
);
499 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
500 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
505 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
509 kvmppc_set_pc(vcpu
, regs
->pc
);
510 kvmppc_set_cr(vcpu
, regs
->cr
);
511 kvmppc_set_ctr(vcpu
, regs
->ctr
);
512 kvmppc_set_lr(vcpu
, regs
->lr
);
513 kvmppc_set_xer(vcpu
, regs
->xer
);
514 kvmppc_set_msr(vcpu
, regs
->msr
);
515 kvmppc_set_srr0(vcpu
, regs
->srr0
);
516 kvmppc_set_srr1(vcpu
, regs
->srr1
);
517 kvmppc_set_sprg0(vcpu
, regs
->sprg0
);
518 kvmppc_set_sprg1(vcpu
, regs
->sprg1
);
519 kvmppc_set_sprg2(vcpu
, regs
->sprg2
);
520 kvmppc_set_sprg3(vcpu
, regs
->sprg3
);
521 kvmppc_set_sprg4(vcpu
, regs
->sprg4
);
522 kvmppc_set_sprg5(vcpu
, regs
->sprg5
);
523 kvmppc_set_sprg6(vcpu
, regs
->sprg6
);
524 kvmppc_set_sprg7(vcpu
, regs
->sprg7
);
526 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
527 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
532 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
537 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
542 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
543 union kvmppc_one_reg
*val
)
548 r
= vcpu
->kvm
->arch
.kvm_ops
->get_one_reg(vcpu
, id
, val
);
552 case KVM_REG_PPC_DAR
:
553 *val
= get_reg_val(id
, kvmppc_get_dar(vcpu
));
555 case KVM_REG_PPC_DSISR
:
556 *val
= get_reg_val(id
, kvmppc_get_dsisr(vcpu
));
558 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
559 i
= id
- KVM_REG_PPC_FPR0
;
560 *val
= get_reg_val(id
, VCPU_FPR(vcpu
, i
));
562 case KVM_REG_PPC_FPSCR
:
563 *val
= get_reg_val(id
, vcpu
->arch
.fp
.fpscr
);
566 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
567 if (cpu_has_feature(CPU_FTR_VSX
)) {
568 i
= id
- KVM_REG_PPC_VSR0
;
569 val
->vsxval
[0] = vcpu
->arch
.fp
.fpr
[i
][0];
570 val
->vsxval
[1] = vcpu
->arch
.fp
.fpr
[i
][1];
575 #endif /* CONFIG_VSX */
576 case KVM_REG_PPC_DEBUG_INST
:
577 *val
= get_reg_val(id
, INS_TW
);
579 #ifdef CONFIG_KVM_XICS
580 case KVM_REG_PPC_ICP_STATE
:
581 if (!vcpu
->arch
.icp
) {
585 *val
= get_reg_val(id
, kvmppc_xics_get_icp(vcpu
));
587 #endif /* CONFIG_KVM_XICS */
588 case KVM_REG_PPC_FSCR
:
589 *val
= get_reg_val(id
, vcpu
->arch
.fscr
);
591 case KVM_REG_PPC_TAR
:
592 *val
= get_reg_val(id
, vcpu
->arch
.tar
);
594 case KVM_REG_PPC_EBBHR
:
595 *val
= get_reg_val(id
, vcpu
->arch
.ebbhr
);
597 case KVM_REG_PPC_EBBRR
:
598 *val
= get_reg_val(id
, vcpu
->arch
.ebbrr
);
600 case KVM_REG_PPC_BESCR
:
601 *val
= get_reg_val(id
, vcpu
->arch
.bescr
);
604 *val
= get_reg_val(id
, vcpu
->arch
.ic
);
615 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
616 union kvmppc_one_reg
*val
)
621 r
= vcpu
->kvm
->arch
.kvm_ops
->set_one_reg(vcpu
, id
, val
);
625 case KVM_REG_PPC_DAR
:
626 kvmppc_set_dar(vcpu
, set_reg_val(id
, *val
));
628 case KVM_REG_PPC_DSISR
:
629 kvmppc_set_dsisr(vcpu
, set_reg_val(id
, *val
));
631 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
632 i
= id
- KVM_REG_PPC_FPR0
;
633 VCPU_FPR(vcpu
, i
) = set_reg_val(id
, *val
);
635 case KVM_REG_PPC_FPSCR
:
636 vcpu
->arch
.fp
.fpscr
= set_reg_val(id
, *val
);
639 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
640 if (cpu_has_feature(CPU_FTR_VSX
)) {
641 i
= id
- KVM_REG_PPC_VSR0
;
642 vcpu
->arch
.fp
.fpr
[i
][0] = val
->vsxval
[0];
643 vcpu
->arch
.fp
.fpr
[i
][1] = val
->vsxval
[1];
648 #endif /* CONFIG_VSX */
649 #ifdef CONFIG_KVM_XICS
650 case KVM_REG_PPC_ICP_STATE
:
651 if (!vcpu
->arch
.icp
) {
655 r
= kvmppc_xics_set_icp(vcpu
,
656 set_reg_val(id
, *val
));
658 #endif /* CONFIG_KVM_XICS */
659 case KVM_REG_PPC_FSCR
:
660 vcpu
->arch
.fscr
= set_reg_val(id
, *val
);
662 case KVM_REG_PPC_TAR
:
663 vcpu
->arch
.tar
= set_reg_val(id
, *val
);
665 case KVM_REG_PPC_EBBHR
:
666 vcpu
->arch
.ebbhr
= set_reg_val(id
, *val
);
668 case KVM_REG_PPC_EBBRR
:
669 vcpu
->arch
.ebbrr
= set_reg_val(id
, *val
);
671 case KVM_REG_PPC_BESCR
:
672 vcpu
->arch
.bescr
= set_reg_val(id
, *val
);
675 vcpu
->arch
.ic
= set_reg_val(id
, *val
);
686 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
688 vcpu
->kvm
->arch
.kvm_ops
->vcpu_load(vcpu
, cpu
);
691 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
693 vcpu
->kvm
->arch
.kvm_ops
->vcpu_put(vcpu
);
696 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
698 vcpu
->kvm
->arch
.kvm_ops
->set_msr(vcpu
, msr
);
700 EXPORT_SYMBOL_GPL(kvmppc_set_msr
);
702 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
704 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_run(kvm_run
, vcpu
);
707 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
708 struct kvm_translation
*tr
)
713 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
714 struct kvm_guest_debug
*dbg
)
716 vcpu
->guest_debug
= dbg
->control
;
720 void kvmppc_decrementer_func(struct kvm_vcpu
*vcpu
)
722 kvmppc_core_queue_dec(vcpu
);
726 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
728 return kvm
->arch
.kvm_ops
->vcpu_create(kvm
, id
);
731 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
733 vcpu
->kvm
->arch
.kvm_ops
->vcpu_free(vcpu
);
736 int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
738 return vcpu
->kvm
->arch
.kvm_ops
->check_requests(vcpu
);
741 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
743 return kvm
->arch
.kvm_ops
->get_dirty_log(kvm
, log
);
746 void kvmppc_core_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
747 struct kvm_memory_slot
*dont
)
749 kvm
->arch
.kvm_ops
->free_memslot(free
, dont
);
752 int kvmppc_core_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
753 unsigned long npages
)
755 return kvm
->arch
.kvm_ops
->create_memslot(slot
, npages
);
758 void kvmppc_core_flush_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
760 kvm
->arch
.kvm_ops
->flush_memslot(kvm
, memslot
);
763 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
764 struct kvm_memory_slot
*memslot
,
765 const struct kvm_userspace_memory_region
*mem
)
767 return kvm
->arch
.kvm_ops
->prepare_memory_region(kvm
, memslot
, mem
);
770 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
771 const struct kvm_userspace_memory_region
*mem
,
772 const struct kvm_memory_slot
*old
,
773 const struct kvm_memory_slot
*new)
775 kvm
->arch
.kvm_ops
->commit_memory_region(kvm
, mem
, old
, new);
778 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
780 return kvm
->arch
.kvm_ops
->unmap_hva(kvm
, hva
);
782 EXPORT_SYMBOL_GPL(kvm_unmap_hva
);
784 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
786 return kvm
->arch
.kvm_ops
->unmap_hva_range(kvm
, start
, end
);
789 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
791 return kvm
->arch
.kvm_ops
->age_hva(kvm
, start
, end
);
794 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
796 return kvm
->arch
.kvm_ops
->test_age_hva(kvm
, hva
);
799 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
801 kvm
->arch
.kvm_ops
->set_spte_hva(kvm
, hva
, pte
);
804 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
806 vcpu
->kvm
->arch
.kvm_ops
->mmu_destroy(vcpu
);
809 int kvmppc_core_init_vm(struct kvm
*kvm
)
813 INIT_LIST_HEAD_RCU(&kvm
->arch
.spapr_tce_tables
);
814 INIT_LIST_HEAD(&kvm
->arch
.rtas_tokens
);
817 return kvm
->arch
.kvm_ops
->init_vm(kvm
);
820 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
822 kvm
->arch
.kvm_ops
->destroy_vm(kvm
);
825 kvmppc_rtas_tokens_free(kvm
);
826 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
830 int kvmppc_h_logical_ci_load(struct kvm_vcpu
*vcpu
)
832 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
833 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
838 if (!is_power_of_2(size
) || (size
> sizeof(buf
)))
841 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
842 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
843 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
849 kvmppc_set_gpr(vcpu
, 4, *(u8
*)&buf
);
853 kvmppc_set_gpr(vcpu
, 4, be16_to_cpu(*(__be16
*)&buf
));
857 kvmppc_set_gpr(vcpu
, 4, be32_to_cpu(*(__be32
*)&buf
));
861 kvmppc_set_gpr(vcpu
, 4, be64_to_cpu(*(__be64
*)&buf
));
870 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load
);
872 int kvmppc_h_logical_ci_store(struct kvm_vcpu
*vcpu
)
874 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
875 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
876 unsigned long val
= kvmppc_get_gpr(vcpu
, 6);
887 *(__be16
*)&buf
= cpu_to_be16(val
);
891 *(__be32
*)&buf
= cpu_to_be32(val
);
895 *(__be64
*)&buf
= cpu_to_be64(val
);
902 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
903 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
904 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
910 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store
);
912 int kvmppc_core_check_processor_compat(void)
915 * We always return 0 for book3s. We check
916 * for compatibility while loading the HV
922 int kvmppc_book3s_hcall_implemented(struct kvm
*kvm
, unsigned long hcall
)
924 return kvm
->arch
.kvm_ops
->hcall_implemented(hcall
);
927 static int kvmppc_book3s_init(void)
931 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
934 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
935 r
= kvmppc_book3s_init_pr();
941 static void kvmppc_book3s_exit(void)
943 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
944 kvmppc_book3s_exit_pr();
949 module_init(kvmppc_book3s_init
);
950 module_exit(kvmppc_book3s_exit
);
952 /* On 32bit this is our one and only kernel module */
953 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
954 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
955 MODULE_ALIAS("devname:kvm");